diff --git a/.gitignore b/.gitignore index 9dacde0a4b2..e20396d2d05 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ *.symtypes *.order modules.builtin +*.rej *.elf *.bin *.gz diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml new file mode 100644 index 00000000000..85164016ed2 --- /dev/null +++ b/Documentation/DocBook/media/v4l/controls.xml @@ -0,0 +1,3366 @@ +
+ User Controls + + Devices typically have a number of user-settable controls +such as brightness, saturation and so on, which would be presented to +the user on a graphical user interface. But, different devices +will have different controls available, and furthermore, the range of +possible values, and the default value will vary from device to +device. The control ioctls provide the information and a mechanism to +create a nice user interface for these controls that will work +correctly with any device. + + All controls are accessed using an ID value. V4L2 defines +several IDs for specific purposes. Drivers can also implement their +own custom controls using V4L2_CID_PRIVATE_BASE +and higher values. The pre-defined control IDs have the prefix +V4L2_CID_, and are listed in . The ID is used when querying the attributes of +a control, and when getting or setting the current value. + + Generally applications should present controls to the user +without assumptions about their purpose. Each control comes with a +name string the user is supposed to understand. When the purpose is +non-intuitive the driver writer should provide a user manual, a user +interface plug-in or a driver specific panel application. Predefined +IDs were introduced to change a few controls programmatically, for +example to mute a device during a channel switch. + + Drivers may enumerate different controls after switching +the current video input or output, tuner or modulator, or audio input +or output. Different in the sense of other bounds, another default and +current value, step size or other menu items. A control with a certain +custom ID can also change name and +type. + It will be more convenient for applications if drivers +make use of the V4L2_CTRL_FLAG_DISABLED flag, but +that was never required. + Control values are stored globally, they do not +change when switching except to stay within the reported bounds. They +also do not change ⪚ when the device is opened or closed, when the +tuner radio frequency is changed or generally never without +application request. Since V4L2 specifies no event mechanism, panel +applications intended to cooperate with other panel applications (be +they built into a larger application, as a TV viewer) may need to +regularly poll control values to update their user +interface. + Applications could call an ioctl to request events. +After another process called &VIDIOC-S-CTRL; or another ioctl changing +shared properties the &func-select; function would indicate +readability until any ioctl (querying the properties) is +called. + + + + All controls use machine endianness. + + + + Control IDs + + &cs-def; + + + ID + Type + Description + + + + + V4L2_CID_BASE + + First predefined ID, equal to +V4L2_CID_BRIGHTNESS. + + + V4L2_CID_USER_BASE + + Synonym of V4L2_CID_BASE. + + + V4L2_CID_BRIGHTNESS + integer + Picture brightness, or more precisely, the black +level. + + + V4L2_CID_CONTRAST + integer + Picture contrast or luma gain. + + + V4L2_CID_SATURATION + integer + Picture color saturation or chroma gain. + + + V4L2_CID_HUE + integer + Hue or color balance. + + + V4L2_CID_AUDIO_VOLUME + integer + Overall audio volume. Note some drivers also +provide an OSS or ALSA mixer interface. + + + V4L2_CID_AUDIO_BALANCE + integer + Audio stereo balance. Minimum corresponds to all +the way left, maximum to right. + + + V4L2_CID_AUDIO_BASS + integer + Audio bass adjustment. + + + V4L2_CID_AUDIO_TREBLE + integer + Audio treble adjustment. + + + V4L2_CID_AUDIO_MUTE + boolean + Mute audio, &ie; set the volume to zero, however +without affecting V4L2_CID_AUDIO_VOLUME. Like +ALSA drivers, V4L2 drivers must mute at load time to avoid excessive +noise. Actually the entire device should be reset to a low power +consumption state. + + + V4L2_CID_AUDIO_LOUDNESS + boolean + Loudness mode (bass boost). + + + V4L2_CID_BLACK_LEVEL + integer + Another name for brightness (not a synonym of +V4L2_CID_BRIGHTNESS). This control is deprecated +and should not be used in new drivers and applications. + + + V4L2_CID_AUTO_WHITE_BALANCE + boolean + Automatic white balance (cameras). + + + V4L2_CID_DO_WHITE_BALANCE + button + This is an action control. When set (the value is +ignored), the device will do a white balance and then hold the current +setting. Contrast this with the boolean +V4L2_CID_AUTO_WHITE_BALANCE, which, when +activated, keeps adjusting the white balance. + + + V4L2_CID_RED_BALANCE + integer + Red chroma balance. + + + V4L2_CID_BLUE_BALANCE + integer + Blue chroma balance. + + + V4L2_CID_GAMMA + integer + Gamma adjust. + + + V4L2_CID_WHITENESS + integer + Whiteness for grey-scale devices. This is a synonym +for V4L2_CID_GAMMA. This control is deprecated +and should not be used in new drivers and applications. + + + V4L2_CID_EXPOSURE + integer + Exposure (cameras). [Unit?] + + + V4L2_CID_AUTOGAIN + boolean + Automatic gain/exposure control. + + + V4L2_CID_GAIN + integer + Gain control. + + + V4L2_CID_HFLIP + boolean + Mirror the picture horizontally. + + + V4L2_CID_VFLIP + boolean + Mirror the picture vertically. + + + V4L2_CID_HCENTER_DEPRECATED (formerly V4L2_CID_HCENTER) + integer + Horizontal image centering. This control is +deprecated. New drivers and applications should use the Camera class controls +V4L2_CID_PAN_ABSOLUTE, +V4L2_CID_PAN_RELATIVE and +V4L2_CID_PAN_RESET instead. + + + V4L2_CID_VCENTER_DEPRECATED + (formerly V4L2_CID_VCENTER) + integer + Vertical image centering. Centering is intended to +physically adjust cameras. For image cropping see +, for clipping . This +control is deprecated. New drivers and applications should use the +Camera class controls +V4L2_CID_TILT_ABSOLUTE, +V4L2_CID_TILT_RELATIVE and +V4L2_CID_TILT_RESET instead. + + + V4L2_CID_POWER_LINE_FREQUENCY + enum + Enables a power line frequency filter to avoid +flicker. Possible values for enum v4l2_power_line_frequency are: +V4L2_CID_POWER_LINE_FREQUENCY_DISABLED (0), +V4L2_CID_POWER_LINE_FREQUENCY_50HZ (1) and +V4L2_CID_POWER_LINE_FREQUENCY_60HZ (2). + + + V4L2_CID_HUE_AUTO + boolean + Enables automatic hue control by the device. The +effect of setting V4L2_CID_HUE while automatic +hue control is enabled is undefined, drivers should ignore such +request. + + + V4L2_CID_WHITE_BALANCE_TEMPERATURE + integer + This control specifies the white balance settings +as a color temperature in Kelvin. A driver should have a minimum of +2800 (incandescent) to 6500 (daylight). For more information about +color temperature see Wikipedia. + + + V4L2_CID_SHARPNESS + integer + Adjusts the sharpness filters in a camera. The +minimum value disables the filters, higher values give a sharper +picture. + + + V4L2_CID_BACKLIGHT_COMPENSATION + integer + Adjusts the backlight compensation in a camera. The +minimum value disables backlight compensation. + + + V4L2_CID_CHROMA_AGC + boolean + Chroma automatic gain control. + + + V4L2_CID_CHROMA_GAIN + integer + Adjusts the Chroma gain control (for use when chroma AGC + is disabled). + + + V4L2_CID_COLOR_KILLER + boolean + Enable the color killer (&ie; force a black & white image in case of a weak video signal). + + + V4L2_CID_COLORFX + enum + Selects a color effect. Possible values for +enum v4l2_colorfx are: +V4L2_COLORFX_NONE (0), +V4L2_COLORFX_BW (1), +V4L2_COLORFX_SEPIA (2), +V4L2_COLORFX_NEGATIVE (3), +V4L2_COLORFX_EMBOSS (4), +V4L2_COLORFX_SKETCH (5), +V4L2_COLORFX_SKY_BLUE (6), +V4L2_COLORFX_GRASS_GREEN (7), +V4L2_COLORFX_SKIN_WHITEN (8) and +V4L2_COLORFX_VIVID (9). + + + V4L2_CID_ROTATE + integer + Rotates the image by specified angle. Common angles are 90, + 270 and 180. Rotating the image to 90 and 270 will reverse the height + and width of the display window. It is necessary to set the new height and + width of the picture using the &VIDIOC-S-FMT; ioctl according to + the rotation angle selected. + + + V4L2_CID_BG_COLOR + integer + Sets the background color on the current output device. + Background color needs to be specified in the RGB24 format. The + supplied 32 bit value is interpreted as bits 0-7 Red color information, + bits 8-15 Green color information, bits 16-23 Blue color + information and bits 24-31 must be zero. + + + V4L2_CID_ILLUMINATORS_1 + V4L2_CID_ILLUMINATORS_2 + boolean + Switch on or off the illuminator 1 or 2 of the device + (usually a microscope). + + + V4L2_CID_LASTP1 + + End of the predefined control IDs (currently +V4L2_CID_ILLUMINATORS_2 + 1). + + + V4L2_CID_MIN_BUFFERS_FOR_CAPTURE + integer + This is a read-only control that can be read by the application +and used as a hint to determine the number of CAPTURE buffers to pass to REQBUFS. +The value is the minimum number of CAPTURE buffers that is necessary for hardware +to work. + + + V4L2_CID_MIN_BUFFERS_FOR_OUTPUT + integer + This is a read-only control that can be read by the application +and used as a hint to determine the number of OUTPUT buffers to pass to REQBUFS. +The value is the minimum number of OUTPUT buffers that is necessary for hardware +to work. + + + V4L2_CID_PRIVATE_BASE + + ID of the first custom (driver specific) control. +Applications depending on particular custom controls should check the +driver name and version, see . + + + +
+ + Applications can enumerate the available controls with the +&VIDIOC-QUERYCTRL; and &VIDIOC-QUERYMENU; ioctls, get and set a +control value with the &VIDIOC-G-CTRL; and &VIDIOC-S-CTRL; ioctls. +Drivers must implement VIDIOC_QUERYCTRL, +VIDIOC_G_CTRL and +VIDIOC_S_CTRL when the device has one or more +controls, VIDIOC_QUERYMENU when it has one or +more menu type controls. + + + Enumerating all controls + + +&v4l2-queryctrl; queryctrl; +&v4l2-querymenu; querymenu; + +static void +enumerate_menu (void) +{ + printf (" Menu items:\n"); + + memset (&querymenu, 0, sizeof (querymenu)); + querymenu.id = queryctrl.id; + + for (querymenu.index = queryctrl.minimum; + querymenu.index <= queryctrl.maximum; + querymenu.index++) { + if (0 == ioctl (fd, &VIDIOC-QUERYMENU;, &querymenu)) { + printf (" %s\n", querymenu.name); + } + } +} + +memset (&queryctrl, 0, sizeof (queryctrl)); + +for (queryctrl.id = V4L2_CID_BASE; + queryctrl.id < V4L2_CID_LASTP1; + queryctrl.id++) { + if (0 == ioctl (fd, &VIDIOC-QUERYCTRL;, &queryctrl)) { + if (queryctrl.flags & V4L2_CTRL_FLAG_DISABLED) + continue; + + printf ("Control %s\n", queryctrl.name); + + if (queryctrl.type == V4L2_CTRL_TYPE_MENU) + enumerate_menu (); + } else { + if (errno == EINVAL) + continue; + + perror ("VIDIOC_QUERYCTRL"); + exit (EXIT_FAILURE); + } +} + +for (queryctrl.id = V4L2_CID_PRIVATE_BASE;; + queryctrl.id++) { + if (0 == ioctl (fd, &VIDIOC-QUERYCTRL;, &queryctrl)) { + if (queryctrl.flags & V4L2_CTRL_FLAG_DISABLED) + continue; + + printf ("Control %s\n", queryctrl.name); + + if (queryctrl.type == V4L2_CTRL_TYPE_MENU) + enumerate_menu (); + } else { + if (errno == EINVAL) + break; + + perror ("VIDIOC_QUERYCTRL"); + exit (EXIT_FAILURE); + } +} + + + + + Changing controls + + +&v4l2-queryctrl; queryctrl; +&v4l2-control; control; + +memset (&queryctrl, 0, sizeof (queryctrl)); +queryctrl.id = V4L2_CID_BRIGHTNESS; + +if (-1 == ioctl (fd, &VIDIOC-QUERYCTRL;, &queryctrl)) { + if (errno != EINVAL) { + perror ("VIDIOC_QUERYCTRL"); + exit (EXIT_FAILURE); + } else { + printf ("V4L2_CID_BRIGHTNESS is not supported\n"); + } +} else if (queryctrl.flags & V4L2_CTRL_FLAG_DISABLED) { + printf ("V4L2_CID_BRIGHTNESS is not supported\n"); +} else { + memset (&control, 0, sizeof (control)); + control.id = V4L2_CID_BRIGHTNESS; + control.value = queryctrl.default_value; + + if (-1 == ioctl (fd, &VIDIOC-S-CTRL;, &control)) { + perror ("VIDIOC_S_CTRL"); + exit (EXIT_FAILURE); + } +} + +memset (&control, 0, sizeof (control)); +control.id = V4L2_CID_CONTRAST; + +if (0 == ioctl (fd, &VIDIOC-G-CTRL;, &control)) { + control.value += 1; + + /* The driver may clamp the value or return ERANGE, ignored here */ + + if (-1 == ioctl (fd, &VIDIOC-S-CTRL;, &control) + && errno != ERANGE) { + perror ("VIDIOC_S_CTRL"); + exit (EXIT_FAILURE); + } +/* Ignore if V4L2_CID_CONTRAST is unsupported */ +} else if (errno != EINVAL) { + perror ("VIDIOC_G_CTRL"); + exit (EXIT_FAILURE); +} + +control.id = V4L2_CID_AUDIO_MUTE; +control.value = TRUE; /* silence */ + +/* Errors ignored */ +ioctl (fd, VIDIOC_S_CTRL, &control); + + +
+ +
+ Extended Controls + +
+ Introduction + + The control mechanism as originally designed was meant +to be used for user settings (brightness, saturation, etc). However, +it turned out to be a very useful model for implementing more +complicated driver APIs where each driver implements only a subset of +a larger API. + + The MPEG encoding API was the driving force behind +designing and implementing this extended control mechanism: the MPEG +standard is quite large and the currently supported hardware MPEG +encoders each only implement a subset of this standard. Further more, +many parameters relating to how the video is encoded into an MPEG +stream are specific to the MPEG encoding chip since the MPEG standard +only defines the format of the resulting MPEG stream, not how the +video is actually encoded into that format. + + Unfortunately, the original control API lacked some +features needed for these new uses and so it was extended into the +(not terribly originally named) extended control API. + + Even though the MPEG encoding API was the first effort +to use the Extended Control API, nowadays there are also other classes +of Extended Controls, such as Camera Controls and FM Transmitter Controls. +The Extended Controls API as well as all Extended Controls classes are +described in the following text. +
+ +
+ The Extended Control API + + Three new ioctls are available: &VIDIOC-G-EXT-CTRLS;, +&VIDIOC-S-EXT-CTRLS; and &VIDIOC-TRY-EXT-CTRLS;. These ioctls act on +arrays of controls (as opposed to the &VIDIOC-G-CTRL; and +&VIDIOC-S-CTRL; ioctls that act on a single control). This is needed +since it is often required to atomically change several controls at +once. + + Each of the new ioctls expects a pointer to a +&v4l2-ext-controls;. This structure contains a pointer to the control +array, a count of the number of controls in that array and a control +class. Control classes are used to group similar controls into a +single class. For example, control class +V4L2_CTRL_CLASS_USER contains all user controls +(&ie; all controls that can also be set using the old +VIDIOC_S_CTRL ioctl). Control class +V4L2_CTRL_CLASS_MPEG contains all controls +relating to MPEG encoding, etc. + + All controls in the control array must belong to the +specified control class. An error is returned if this is not the +case. + + It is also possible to use an empty control array (count +== 0) to check whether the specified control class is +supported. + + The control array is a &v4l2-ext-control; array. The +v4l2_ext_control structure is very similar to +&v4l2-control;, except for the fact that it also allows for 64-bit +values and pointers to be passed. + + It is important to realize that due to the flexibility of +controls it is necessary to check whether the control you want to set +actually is supported in the driver and what the valid range of values +is. So use the &VIDIOC-QUERYCTRL; and &VIDIOC-QUERYMENU; ioctls to +check this. Also note that it is possible that some of the menu +indices in a control of type V4L2_CTRL_TYPE_MENU +may not be supported (VIDIOC_QUERYMENU will +return an error). A good example is the list of supported MPEG audio +bitrates. Some drivers only support one or two bitrates, others +support a wider range. + + + All controls use machine endianness. + +
+ +
+ Enumerating Extended Controls + + The recommended way to enumerate over the extended +controls is by using &VIDIOC-QUERYCTRL; in combination with the +V4L2_CTRL_FLAG_NEXT_CTRL flag: + + + +&v4l2-queryctrl; qctrl; + +qctrl.id = V4L2_CTRL_FLAG_NEXT_CTRL; +while (0 == ioctl (fd, &VIDIOC-QUERYCTRL;, &qctrl)) { + /* ... */ + qctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL; +} + + + + The initial control ID is set to 0 ORed with the +V4L2_CTRL_FLAG_NEXT_CTRL flag. The +VIDIOC_QUERYCTRL ioctl will return the first +control with a higher ID than the specified one. When no such controls +are found an error is returned. + + If you want to get all controls within a specific control +class, then you can set the initial +qctrl.id value to the control class and add +an extra check to break out of the loop when a control of another +control class is found: + + + +qctrl.id = V4L2_CTRL_CLASS_MPEG | V4L2_CTRL_FLAG_NEXT_CTRL; +while (0 == ioctl (fd, &VIDIOC-QUERYCTRL;, &qctrl)) { + if (V4L2_CTRL_ID2CLASS (qctrl.id) != V4L2_CTRL_CLASS_MPEG) + break; + /* ... */ + qctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL; + } + + + + The 32-bit qctrl.id value is +subdivided into three bit ranges: the top 4 bits are reserved for +flags (⪚ V4L2_CTRL_FLAG_NEXT_CTRL) and are not +actually part of the ID. The remaining 28 bits form the control ID, of +which the most significant 12 bits define the control class and the +least significant 16 bits identify the control within the control +class. It is guaranteed that these last 16 bits are always non-zero +for controls. The range of 0x1000 and up are reserved for +driver-specific controls. The macro +V4L2_CTRL_ID2CLASS(id) returns the control class +ID based on a control ID. + + If the driver does not support extended controls, then +VIDIOC_QUERYCTRL will fail when used in +combination with V4L2_CTRL_FLAG_NEXT_CTRL. In +that case the old method of enumerating control should be used (see +1.8). But if it is supported, then it is guaranteed to enumerate over +all controls, including driver-private controls. +
+ +
+ Creating Control Panels + + It is possible to create control panels for a graphical +user interface where the user can select the various controls. +Basically you will have to iterate over all controls using the method +described above. Each control class starts with a control of type +V4L2_CTRL_TYPE_CTRL_CLASS. +VIDIOC_QUERYCTRL will return the name of this +control class which can be used as the title of a tab page within a +control panel. + + The flags field of &v4l2-queryctrl; also contains hints on +the behavior of the control. See the &VIDIOC-QUERYCTRL; documentation +for more details. +
+ +
+ MPEG Control Reference + + Below all controls within the MPEG control class are +described. First the generic controls, then controls specific for +certain hardware. + +
+ Generic MPEG Controls + + + MPEG Control IDs + + + + + + + + + + ID + Type + Description + + + + + + V4L2_CID_MPEG_CLASS  + class + The MPEG class +descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a +description of this control class. This description can be used as the +caption of a Tab page in a GUI, for example. + + + + V4L2_CID_MPEG_STREAM_TYPE  + enum v4l2_mpeg_stream_type + The MPEG-1, -2 or -4 +output stream type. One cannot assume anything here. Each hardware +MPEG encoder tends to support different subsets of the available MPEG +stream types. This control is specific to multiplexed MPEG streams. +The currently defined stream types are: + + + + + + V4L2_MPEG_STREAM_TYPE_MPEG2_PS  + MPEG-2 program stream + + + V4L2_MPEG_STREAM_TYPE_MPEG2_TS  + MPEG-2 transport stream + + + V4L2_MPEG_STREAM_TYPE_MPEG1_SS  + MPEG-1 system stream + + + V4L2_MPEG_STREAM_TYPE_MPEG2_DVD  + MPEG-2 DVD-compatible stream + + + V4L2_MPEG_STREAM_TYPE_MPEG1_VCD  + MPEG-1 VCD-compatible stream + + + V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD  + MPEG-2 SVCD-compatible stream + + + + + + + V4L2_CID_MPEG_STREAM_PID_PMT  + integer + Program Map Table +Packet ID for the MPEG transport stream (default 16) + + + + V4L2_CID_MPEG_STREAM_PID_AUDIO  + integer + Audio Packet ID for +the MPEG transport stream (default 256) + + + + V4L2_CID_MPEG_STREAM_PID_VIDEO  + integer + Video Packet ID for +the MPEG transport stream (default 260) + + + + V4L2_CID_MPEG_STREAM_PID_PCR  + integer + Packet ID for the +MPEG transport stream carrying PCR fields (default 259) + + + + V4L2_CID_MPEG_STREAM_PES_ID_AUDIO  + integer + Audio ID for MPEG +PES + + + + V4L2_CID_MPEG_STREAM_PES_ID_VIDEO  + integer + Video ID for MPEG +PES + + + + V4L2_CID_MPEG_STREAM_VBI_FMT  + enum v4l2_mpeg_stream_vbi_fmt + Some cards can embed +VBI data (⪚ Closed Caption, Teletext) into the MPEG stream. This +control selects whether VBI data should be embedded, and if so, what +embedding method should be used. The list of possible VBI formats +depends on the driver. The currently defined VBI format types +are: + + + + + + V4L2_MPEG_STREAM_VBI_FMT_NONE  + No VBI in the MPEG stream + + + V4L2_MPEG_STREAM_VBI_FMT_IVTV  + VBI in private packets, IVTV format (documented +in the kernel sources in the file Documentation/video4linux/cx2341x/README.vbi) + + + + + + + V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ  + enum v4l2_mpeg_audio_sampling_freq + MPEG Audio sampling +frequency. Possible values are: + + + + + + V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100  + 44.1 kHz + + + V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000  + 48 kHz + + + V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000  + 32 kHz + + + + + + + V4L2_CID_MPEG_AUDIO_ENCODING  + enum v4l2_mpeg_audio_encoding + MPEG Audio encoding. +This control is specific to multiplexed MPEG streams. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_ENCODING_LAYER_1  + MPEG-1/2 Layer I encoding + + + V4L2_MPEG_AUDIO_ENCODING_LAYER_2  + MPEG-1/2 Layer II encoding + + + V4L2_MPEG_AUDIO_ENCODING_LAYER_3  + MPEG-1/2 Layer III encoding + + + V4L2_MPEG_AUDIO_ENCODING_AAC  + MPEG-2/4 AAC (Advanced Audio Coding) + + + V4L2_MPEG_AUDIO_ENCODING_AC3  + AC-3 aka ATSC A/52 encoding + + + + + + + V4L2_CID_MPEG_AUDIO_L1_BITRATE  + enum v4l2_mpeg_audio_l1_bitrate + MPEG-1/2 Layer I bitrate. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_L1_BITRATE_32K  + 32 kbit/s + + V4L2_MPEG_AUDIO_L1_BITRATE_64K  + 64 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_96K  + 96 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_128K  + 128 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_160K  + 160 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_192K  + 192 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_224K  + 224 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_256K  + 256 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_288K  + 288 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_320K  + 320 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_352K  + 352 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_384K  + 384 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_416K  + 416 kbit/s + + + V4L2_MPEG_AUDIO_L1_BITRATE_448K  + 448 kbit/s + + + + + + + V4L2_CID_MPEG_AUDIO_L2_BITRATE  + enum v4l2_mpeg_audio_l2_bitrate + MPEG-1/2 Layer II bitrate. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_L2_BITRATE_32K  + 32 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_48K  + 48 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_56K  + 56 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_64K  + 64 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_80K  + 80 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_96K  + 96 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_112K  + 112 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_128K  + 128 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_160K  + 160 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_192K  + 192 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_224K  + 224 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_256K  + 256 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_320K  + 320 kbit/s + + + V4L2_MPEG_AUDIO_L2_BITRATE_384K  + 384 kbit/s + + + + + + + V4L2_CID_MPEG_AUDIO_L3_BITRATE  + enum v4l2_mpeg_audio_l3_bitrate + MPEG-1/2 Layer III bitrate. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_L3_BITRATE_32K  + 32 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_40K  + 40 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_48K  + 48 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_56K  + 56 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_64K  + 64 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_80K  + 80 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_96K  + 96 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_112K  + 112 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_128K  + 128 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_160K  + 160 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_192K  + 192 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_224K  + 224 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_256K  + 256 kbit/s + + + V4L2_MPEG_AUDIO_L3_BITRATE_320K  + 320 kbit/s + + + + + + + V4L2_CID_MPEG_AUDIO_AAC_BITRATE  + integer + AAC bitrate in bits per second. + + + + V4L2_CID_MPEG_AUDIO_AC3_BITRATE  + enum v4l2_mpeg_audio_ac3_bitrate + AC-3 bitrate. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_AC3_BITRATE_32K  + 32 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_40K  + 40 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_48K  + 48 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_56K  + 56 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_64K  + 64 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_80K  + 80 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_96K  + 96 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_112K  + 112 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_128K  + 128 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_160K  + 160 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_192K  + 192 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_224K  + 224 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_256K  + 256 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_320K  + 320 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_384K  + 384 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_448K  + 448 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_512K  + 512 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_576K  + 576 kbit/s + + + V4L2_MPEG_AUDIO_AC3_BITRATE_640K  + 640 kbit/s + + + + + + + V4L2_CID_MPEG_AUDIO_MODE  + enum v4l2_mpeg_audio_mode + MPEG Audio mode. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_MODE_STEREO  + Stereo + + + V4L2_MPEG_AUDIO_MODE_JOINT_STEREO  + Joint Stereo + + + V4L2_MPEG_AUDIO_MODE_DUAL  + Bilingual + + + V4L2_MPEG_AUDIO_MODE_MONO  + Mono + + + + + + + V4L2_CID_MPEG_AUDIO_MODE_EXTENSION  + enum v4l2_mpeg_audio_mode_extension + Joint Stereo +audio mode extension. In Layer I and II they indicate which subbands +are in intensity stereo. All other subbands are coded in stereo. Layer +III is not (yet) supported. Possible values +are: + + + + + + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4  + Subbands 4-31 in intensity stereo + + + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8  + Subbands 8-31 in intensity stereo + + + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12  + Subbands 12-31 in intensity stereo + + + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16  + Subbands 16-31 in intensity stereo + + + + + + + V4L2_CID_MPEG_AUDIO_EMPHASIS  + enum v4l2_mpeg_audio_emphasis + Audio Emphasis. +Possible values are: + + + + + + V4L2_MPEG_AUDIO_EMPHASIS_NONE  + None + + + V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS  + 50/15 microsecond emphasis + + + V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17  + CCITT J.17 + + + + + + + V4L2_CID_MPEG_AUDIO_CRC  + enum v4l2_mpeg_audio_crc + CRC method. Possible +values are: + + + + + + V4L2_MPEG_AUDIO_CRC_NONE  + None + + + V4L2_MPEG_AUDIO_CRC_CRC16  + 16 bit parity check + + + + + + + V4L2_CID_MPEG_AUDIO_MUTE  + boolean + Mutes the audio when +capturing. This is not done by muting audio hardware, which can still +produce a slight hiss, but in the encoder itself, guaranteeing a fixed +and reproducible audio bitstream. 0 = unmuted, 1 = muted. + + + + V4L2_CID_MPEG_VIDEO_ENCODING  + enum v4l2_mpeg_video_encoding + MPEG Video encoding +method. This control is specific to multiplexed MPEG streams. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_ENCODING_MPEG_1  + MPEG-1 Video encoding + + + V4L2_MPEG_VIDEO_ENCODING_MPEG_2  + MPEG-2 Video encoding + + + V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC  + MPEG-4 AVC (H.264) Video encoding + + + + + + + V4L2_CID_MPEG_VIDEO_ASPECT  + enum v4l2_mpeg_video_aspect + Video aspect. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_ASPECT_1x1  + + + V4L2_MPEG_VIDEO_ASPECT_4x3  + + + V4L2_MPEG_VIDEO_ASPECT_16x9  + + + V4L2_MPEG_VIDEO_ASPECT_221x100  + + + + + + + V4L2_CID_MPEG_VIDEO_B_FRAMES  + integer + Number of B-Frames +(default 2) + + + + V4L2_CID_MPEG_VIDEO_GOP_SIZE  + integer + GOP size (default +12) + + + + V4L2_CID_MPEG_VIDEO_GOP_CLOSURE  + boolean + GOP closure (default +1) + + + + V4L2_CID_MPEG_VIDEO_PULLDOWN  + boolean + Enable 3:2 pulldown +(default 0) + + + + V4L2_CID_MPEG_VIDEO_BITRATE_MODE  + enum v4l2_mpeg_video_bitrate_mode + Video bitrate mode. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR  + Variable bitrate + + + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR  + Constant bitrate + + + + + + + V4L2_CID_MPEG_VIDEO_BITRATE  + integer + Video bitrate in bits +per second. + + + + V4L2_CID_MPEG_VIDEO_BITRATE_PEAK  + integer + Peak video bitrate in +bits per second. Must be larger or equal to the average video bitrate. +It is ignored if the video bitrate mode is set to constant +bitrate. + + + + V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION  + integer + For every captured +frame, skip this many subsequent frames (default 0). + + + + V4L2_CID_MPEG_VIDEO_MUTE  + boolean + + "Mutes" the video to a +fixed color when capturing. This is useful for testing, to produce a +fixed video bitstream. 0 = unmuted, 1 = muted. + + + + V4L2_CID_MPEG_VIDEO_MUTE_YUV  + integer + Sets the "mute" color +of the video. The supplied 32-bit integer is interpreted as follows (bit +0 = least significant bit): + + + + + + Bit 0:7 + V chrominance information + + + Bit 8:15 + U chrominance information + + + Bit 16:23 + Y luminance information + + + Bit 24:31 + Must be zero. + + + + + + + + + V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE  + boolean + + If enabled the decoder expects to receive a single slice per buffer, otherwise +the decoder expects a single frame in per buffer. Applicable to the decoder, all codecs. + + + + + + V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE  + boolean + + Enable writing sample aspect ratio in the Video Usability Information. +Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC  + enum v4l2_mpeg_video_h264_vui_sar_idc + + VUI sample aspect ratio indicator for H.264 encoding. The value +is defined in the table E-1 in the standard. Applicable to the H264 encoder. + + + + + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED  + Unspecified + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1  + 1x1 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11  + 12x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11  + 10x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11  + 16x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33  + 40x33 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11  + 24x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11  + 20x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11  + 32x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33  + 80x33 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11  + 18x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11  + 15x11 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33  + 64x33 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99  + 160x99 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3  + 4x3 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2  + 3x2 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1  + 2x1 + + + V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED  + Extended SAR + + + + + + + + V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH  + integer + + Extended sample aspect ratio width for H.264 VUI encoding. +Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT  + integer + + Extended sample aspect ratio height for H.264 VUI encoding. +Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_LEVEL  + enum v4l2_mpeg_video_h264_level + + The level information for the H264 video elementary stream. +Applicable to the H264 encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_H264_LEVEL_1_0  + Level 1.0 + + + V4L2_MPEG_VIDEO_H264_LEVEL_1B  + Level 1B + + + V4L2_MPEG_VIDEO_H264_LEVEL_1_1  + Level 1.1 + + + V4L2_MPEG_VIDEO_H264_LEVEL_1_2  + Level 1.2 + + + V4L2_MPEG_VIDEO_H264_LEVEL_1_3  + Level 1.3 + + + V4L2_MPEG_VIDEO_H264_LEVEL_2_0  + Level 2.0 + + + V4L2_MPEG_VIDEO_H264_LEVEL_2_1  + Level 2.1 + + + V4L2_MPEG_VIDEO_H264_LEVEL_2_2  + Level 2.2 + + + V4L2_MPEG_VIDEO_H264_LEVEL_3_0  + Level 3.0 + + + V4L2_MPEG_VIDEO_H264_LEVEL_3_1  + Level 3.1 + + + V4L2_MPEG_VIDEO_H264_LEVEL_3_2  + Level 3.2 + + + V4L2_MPEG_VIDEO_H264_LEVEL_4_0  + Level 4.0 + + + V4L2_MPEG_VIDEO_H264_LEVEL_4_1  + Level 4.1 + + + V4L2_MPEG_VIDEO_H264_LEVEL_4_2  + Level 4.2 + + + V4L2_MPEG_VIDEO_H264_LEVEL_5_0  + Level 5.0 + + + V4L2_MPEG_VIDEO_H264_LEVEL_5_1  + Level 5.1 + + + + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL  + enum v4l2_mpeg_video_mpeg4_level + + The level information for the MPEG4 elementary stream. +Applicable to the MPEG4 encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_LEVEL_0  + Level 0 + + + V4L2_MPEG_VIDEO_LEVEL_0B  + Level 0b + + + V4L2_MPEG_VIDEO_LEVEL_1  + Level 1 + + + V4L2_MPEG_VIDEO_LEVEL_2  + Level 2 + + + V4L2_MPEG_VIDEO_LEVEL_3  + Level 3 + + + V4L2_MPEG_VIDEO_LEVEL_3B  + Level 3b + + + V4L2_MPEG_VIDEO_LEVEL_4  + Level 4 + + + V4L2_MPEG_VIDEO_LEVEL_5  + Level 5 + + + + + + + + V4L2_CID_MPEG_VIDEO_H264_PROFILE  + enum v4l2_mpeg_h264_profile + + The profile information for H264. +Applicable to the H264 encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE  + Baseline profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE  + Constrained Baseline profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_MAIN  + Main profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED  + Extended profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH  + High profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10  + High 10 profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422  + High 422 profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE  + High 444 Predictive profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA  + High 10 Intra profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA  + High 422 Intra profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA  + High 444 Intra profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA  + CAVLC 444 Intra profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE  + Scalable Baseline profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH  + Scalable High profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA  + Scalable High Intra profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH  + Stereo High profile + + + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH  + Multiview High profile + + + + + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE  + enum v4l2_mpeg_mpeg4_profile + + The profile information for MPEG4. +Applicable to the MPEG4 encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_PROFILE_SIMPLE  + Simple profile + + + V4L2_MPEG_VIDEO_PROFILE_ADVANCED_SIMPLE  + Advanced Simple profile + + + V4L2_MPEG_VIDEO_PROFILE_CORE  + Core profile + + + V4L2_MPEG_VIDEO_PROFILE_SIMPLE_SCALABLE  + Simple Scalable profile + + + V4L2_MPEG_VIDEO_PROFILE_ADVANCED_CODING_EFFICIENCY  + + + + + + + + + V4L2_CID_MPEG_VIDEO_MAX_REF_PIC  + integer + + The maximum number of reference pictures used for encoding. +Applicable to the encoder. + + + + + + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE  + enum v4l2_mpeg_multi_slice_mode + + Determines how the encoder should handle division of frame into slices. +Applicable to the encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE  + Single slice per frame. + + + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB  + Multiple slices with set maximum number of macroblocks per slice. + + + V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES  + Multiple slice with set maximum size in bytes per slice. + + + + + + + + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB  + integer + + The maximum number of macroblocks in a slice. Used when +V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE is set to V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB. +Applicable to the encoder. + + + + + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES  + integer + + The maximum size of a slice in bytes. Used when +V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE is set to V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES. +Applicable to the encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE  + enum v4l2_mpeg_h264_loop_filter_mode + + Loop filter mode for H264 encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED  + Loop filter is enabled. + + + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED  + Loop filter is disabled. + + + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY  + Loop filter is disabled at the slice boundary. + + + + + + + + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA  + integer + + Loop filter alpha coefficient, defined in the H264 standard. +Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA  + integer + + Loop filter beta coefficient, defined in the H264 standard. +Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE  + enum v4l2_mpeg_h264_symbol_mode + + Entropy coding mode for H264 - CABAC/CAVALC. +Applicable to the H264 encoder. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC  + Use CAVLC entropy coding. + + + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC  + Use CABAC entropy coding. + + + + + + + + V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM  + boolean + + Enable 8X8 transform for H264. Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB  + integer + + Cyclic intra macroblock refresh. This is the number of continuous macroblocks +refreshed every frame. Each frame a succesive set of macroblocks is refreshed until the cycle completes and starts from the +top of the frame. Applicable to H264, H263 and MPEG4 encoder. + + + + + V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE  + boolean + + Frame level rate control enable. +If this control is disabled then the quantization parameter for each frame type is constant and set with appropriate controls +(e.g. V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP). +If frame rate control is enabled then quantization parameter is adjusted to meet the chosen bitrate. Minimum and maximum value +for the quantization parameter can be set with appropriate controls (e.g. V4L2_CID_MPEG_VIDEO_H263_MIN_QP). +Applicable to encoders. + + + + + V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE  + boolean + + Macroblock level rate control enable. +Applicable to the MPEG4 and H264 encoders. + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_QPEL  + boolean + + Quarter pixel motion estimation for MPEG4. Applicable to the MPEG4 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP  + integer + + Quantization parameter for an I frame for H263. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_H263_MIN_QP  + integer + + Minimum quantization parameter for H263. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_H263_MAX_QP  + integer + + Maximum quantization parameter for H263. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP  + integer + + Quantization parameter for an P frame for H263. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP  + integer + + Quantization parameter for an B frame for H263. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP  + integer + + Quantization parameter for an I frame for H264. Valid range: from 0 to 51. + + + + + V4L2_CID_MPEG_VIDEO_H264_MIN_QP  + integer + + Minimum quantization parameter for H264. Valid range: from 0 to 51. + + + + + V4L2_CID_MPEG_VIDEO_H264_MAX_QP  + integer + + Maximum quantization parameter for H264. Valid range: from 0 to 51. + + + + + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP  + integer + + Quantization parameter for an P frame for H264. Valid range: from 0 to 51. + + + + + V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP  + integer + + Quantization parameter for an B frame for H264. Valid range: from 0 to 51. + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP  + integer + + Quantization parameter for an I frame for MPEG4. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP  + integer + + Minimum quantization parameter for MPEG4. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP  + integer + + Maximum quantization parameter for MPEG4. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP  + integer + + Quantization parameter for an P frame for MPEG4. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP  + integer + + Quantization parameter for an B frame for MPEG4. Valid range: from 1 to 31. + + + + + V4L2_CID_MPEG_VIDEO_VBV_SIZE  + integer + + The Video Buffer Verifier size in kilobytes, it is used as a limitation of frame skip. +The VBV is defined in the standard as a mean to verify that the produced stream will be succesfully decoded. +The standard describes it as "Part of a hypothetical decoder that is conceptually connected to the +output of the encoder. Its purpose is to provide a constraint on the variability of the data rate that an +encoder or editing process may produce.". +Applicable to the MPEG1, MPEG2, MPEG4 encoders. + + + + + V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE  + integer + + The Coded Picture Buffer size in kilobytes, it is used as a limitation of frame skip. +The CPB is defined in the H264 standard as a mean to verify that the produced stream will be succesfully decoded. +Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_H264_I_PERIOD  + integer + + Period between I-frames in the open GOP for H264. In case of an open GOP +this is the period between two I-frames. The period between IDR (Instantaneous Decoding Refresh) frames is taken from the GOP_SIZE control. +An IDR frame, which stands for Instantaneous Decoding Refresh is an I-frame after which no prior frames are +referenced. This means that a stream can be restarted from an IDR frame without the need to store or decode any +previous frames. Applicable to the H264 encoder. + + + + + V4L2_CID_MPEG_VIDEO_HEADER_MODE  + enum v4l2_mpeg_header_mode + + Determines whether the header is returned as the first buffer or is +it returned together with the first frame. Applicable to encoders. +Possible values are: + + + + + + V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE  + The stream header is returned separately in the first buffer. + + + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME  + The stream header is returned together with the first encoded frame. + + + + + + + V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER  + boolean + Enabled the deblocking post processing filter for MPEG4 decoder. +Applicable to the MPEG4 decoder. + + + + V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_RES  + integer + vop_time_increment_resolution value for MPEG4. Applicable to the MPEG4 encoder. + + + + V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_INC  + integer + vop_time_increment value for MPEG4. Applicable to the MPEG4 encoder. + + + + +
+
+ +
+ MFC 5.1 MPEG Controls + + The following MPEG class controls deal with MPEG +decoding and encoding settings that are specific to the Multi Format Codec 5.1 device present +in the S5P family of SoCs by Samsung. + + + + MFC 5.1 Control IDs + + + + + + + + + + ID + Type + Description + + + + + + V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE  + integer + If the display delay is enabled then the decoder has to return a +CAPTURE buffer after processing a certain number of OUTPUT buffers. If this number is low, then it may result in +buffers not being dequeued in display order. In addition hardware may still use those buffers as reference, thus +application should not write to those buffers. This feature can be used for example for generating thumbnails of videos. +Applicable to the H264 decoder. + + + + + V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY  + integer + Display delay value for H264 decoder. +The decoder is forced to return a decoded frame after the set 'display delay' number of frames. If this number is +low it may result in frames returned out of dispaly order, in addition the hardware may still be using the returned buffer +as a reference picture for subsequent frames. + + + + + V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P  + integer + The number of reference pictures used for encoding a P picture. +Applicable to the H264 encoder. + + + + V4L2_CID_MPEG_MFC51_VIDEO_PADDING  + boolean + Padding enable in the encoder - use a color instead of repeating border pixels. +Applicable to encoders. + + + + V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV  + integer + Padding color in the encoder. Applicable to encoders. The supplied 32-bit integer is interpreted as follows (bit +0 = least significant bit): + + + + + + Bit 0:7 + V chrominance information + + + Bit 8:15 + U chrominance information + + + Bit 16:23 + Y luminance information + + + Bit 24:31 + Must be zero. + + + + + + + V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF  + integer + Reaction coefficient for MFC rate control. Applicable to encoders. +Note 1: Valid only when the frame level RC is enabled. +Note 2: For tight CBR, this field must be small (ex. 2 ~ 10). +For VBR, this field must be large (ex. 100 ~ 1000). +Note 3: It is not recommended to use the greater number than FRAME_RATE * (10^9 / BIT_RATE). + + + + + V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK  + boolean + Adaptive rate control for dark region. +Valid only when H.264 and macroblock level RC is enabled (V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE). +Applicable to the H264 encoder. + + + + V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH  + boolean + Adaptive rate control for smooth region. +Valid only when H.264 and macroblock level RC is enabled (V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE). +Applicable to the H264 encoder. + + + + V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC  + boolean + Adaptive rate control for static region. +Valid only when H.264 and macroblock level RC is enabled (V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE). +Applicable to the H264 encoder. + + + + V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY  + boolean + Adaptive rate control for activity region. +Valid only when H.264 and macroblock level RC is enabled (V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE). +Applicable to the H264 encoder. + + + + V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE  + enum v4l2_mpeg_mfc51_frame_skip_mode + + +Indicates in what conditions the encoder should skip frames. If encoding a frame would cause the encoded stream to be larger then +a chosen data limit then the frame will be skipped. +Possible values are: + + + + + + V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED  + Frame skip mode is disabled. + + + V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT  + Frame skip mode enabled and buffer limit is set by the chosen level and is defined by the standard. + + + V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT  + Frame skip mode enabled and buffer limit is set by the VBV (MPEG1/2/4) or CPB (H264) buffer size control. + + + + + + + V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT  + integer + Enable rate-control with fixed target bit. +If this setting is enabled, then the rate control logic of the encoder will calculate the average bitrate +for a GOP and keep it below or equal the set bitrate target. Otherwise the rate control logic calculates the +overall average bitrate for the stream and keeps it below or equal to the set bitrate. In the first case +the average bitrate for the whole stream will be smaller then the set bitrate. This is caused because the +average is calculated for smaller number of frames, on the other hand enabling this setting will ensure that +the stream will meet tight bandwidth contraints. Applicable to encoders. + + + + + V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE  + enum v4l2_mpeg_mfc51_force_frame_type + + Force a frame type for the next queued buffer. Applicable to encoders. +Possible values are: + + + + + + V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED  + Forcing a specific frame type disabled. + + + V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME  + Force an I-frame. + + + V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED  + Force a non-coded frame. + + + + + + +
+
+ +
+ CX2341x MPEG Controls + + The following MPEG class controls deal with MPEG +encoding settings that are specific to the Conexant CX23415 and +CX23416 MPEG encoding chips. + + + CX2341x Control IDs + + + + + + + + + + ID + Type + Description + + + + + + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE  + enum v4l2_mpeg_cx2341x_video_spatial_filter_mode + Sets the Spatial +Filter mode (default MANUAL). Possible values +are: + + + + + + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL  + Choose the filter manually + + + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO  + Choose the filter automatically + + + + + + + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER  + integer (0-15) + The setting for the +Spatial Filter. 0 = off, 15 = maximum. (Default is 0.) + + + + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE  + enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type + Select the algorithm +to use for the Luma Spatial Filter (default +1D_HOR). Possible values: + + + + + + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF  + No filter + + + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR  + One-dimensional horizontal + + + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT  + One-dimensional vertical + + + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE  + Two-dimensional separable + + + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE  + Two-dimensional symmetrical +non-separable + + + + + + + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE  + enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type + Select the algorithm +for the Chroma Spatial Filter (default 1D_HOR). +Possible values are: + + + + + + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF  + No filter + + + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR  + One-dimensional horizontal + + + + + + + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE  + enum v4l2_mpeg_cx2341x_video_temporal_filter_mode + Sets the Temporal +Filter mode (default MANUAL). Possible values +are: + + + + + + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL  + Choose the filter manually + + + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO  + Choose the filter automatically + + + + + + + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER  + integer (0-31) + The setting for the +Temporal Filter. 0 = off, 31 = maximum. (Default is 8 for full-scale +capturing and 0 for scaled capturing.) + + + + V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE  + enum v4l2_mpeg_cx2341x_video_median_filter_type + Median Filter Type +(default OFF). Possible values are: + + + + + + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF  + No filter + + + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR  + Horizontal filter + + + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT  + Vertical filter + + + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT  + Horizontal and vertical filter + + + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG  + Diagonal filter + + + + + + + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM  + integer (0-255) + Threshold above which +the luminance median filter is enabled (default 0) + + + + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP  + integer (0-255) + Threshold below which +the luminance median filter is enabled (default 255) + + + + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM  + integer (0-255) + Threshold above which +the chroma median filter is enabled (default 0) + + + + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP  + integer (0-255) + Threshold below which +the chroma median filter is enabled (default 255) + + + + V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS  + boolean + + The CX2341X MPEG encoder +can insert one empty MPEG-2 PES packet into the stream between every +four video frames. The packet size is 2048 bytes, including the +packet_start_code_prefix and stream_id fields. The stream_id is 0xBF +(private stream 2). The payload consists of 0x00 bytes, to be filled +in by the application. 0 = do not insert, 1 = insert packets. + + + +
+
+
+ +
+ Camera Control Reference + + The Camera class includes controls for mechanical (or +equivalent digital) features of a device such as controllable lenses +or sensors. + + + Camera Control IDs + + + + + + + + + + ID + Type + Description + + + + + + V4L2_CID_CAMERA_CLASS  + class + The Camera class +descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a +description of this control class. + + + + + V4L2_CID_EXPOSURE_AUTO  + enum v4l2_exposure_auto_type + Enables automatic +adjustments of the exposure time and/or iris aperture. The effect of +manual changes of the exposure time or iris aperture while these +features are enabled is undefined, drivers should ignore such +requests. Possible values are: + + + + + + V4L2_EXPOSURE_AUTO  + Automatic exposure time, automatic iris +aperture. + + + V4L2_EXPOSURE_MANUAL  + Manual exposure time, manual iris. + + + V4L2_EXPOSURE_SHUTTER_PRIORITY  + Manual exposure time, auto iris. + + + V4L2_EXPOSURE_APERTURE_PRIORITY  + Auto exposure time, manual iris. + + + + + + + + V4L2_CID_EXPOSURE_ABSOLUTE  + integer + Determines the exposure +time of the camera sensor. The exposure time is limited by the frame +interval. Drivers should interpret the values as 100 µs units, +where the value 1 stands for 1/10000th of a second, 10000 for 1 second +and 100000 for 10 seconds. + + + + + V4L2_CID_EXPOSURE_AUTO_PRIORITY  + boolean + When +V4L2_CID_EXPOSURE_AUTO is set to +AUTO or APERTURE_PRIORITY, +this control determines if the device may dynamically vary the frame +rate. By default this feature is disabled (0) and the frame rate must +remain constant. + + + + + V4L2_CID_PAN_RELATIVE  + integer + This control turns the +camera horizontally by the specified amount. The unit is undefined. A +positive value moves the camera to the right (clockwise when viewed +from above), a negative value to the left. A value of zero does not +cause motion. This is a write-only control. + + + + + V4L2_CID_TILT_RELATIVE  + integer + This control turns the +camera vertically by the specified amount. The unit is undefined. A +positive value moves the camera up, a negative value down. A value of +zero does not cause motion. This is a write-only control. + + + + + V4L2_CID_PAN_RESET  + button + When this control is set, +the camera moves horizontally to the default position. + + + + + V4L2_CID_TILT_RESET  + button + When this control is set, +the camera moves vertically to the default position. + + + + + V4L2_CID_PAN_ABSOLUTE  + integer + This control +turns the camera horizontally to the specified position. Positive +values move the camera to the right (clockwise when viewed from above), +negative values to the left. Drivers should interpret the values as arc +seconds, with valid values between -180 * 3600 and +180 * 3600 +inclusive. + + + + + V4L2_CID_TILT_ABSOLUTE  + integer + This control +turns the camera vertically to the specified position. Positive values +move the camera up, negative values down. Drivers should interpret the +values as arc seconds, with valid values between -180 * 3600 and +180 +* 3600 inclusive. + + + + + V4L2_CID_FOCUS_ABSOLUTE  + integer + This control sets the +focal point of the camera to the specified position. The unit is +undefined. Positive values set the focus closer to the camera, +negative values towards infinity. + + + + + V4L2_CID_FOCUS_RELATIVE  + integer + This control moves the +focal point of the camera by the specified amount. The unit is +undefined. Positive values move the focus closer to the camera, +negative values towards infinity. This is a write-only control. + + + + + V4L2_CID_FOCUS_AUTO  + boolean + Enables automatic focus +adjustments. The effect of manual focus adjustments while this feature +is enabled is undefined, drivers should ignore such requests. + + + + + V4L2_CID_ZOOM_ABSOLUTE  + integer + Specify the objective lens +focal length as an absolute value. The zoom unit is driver-specific and its +value should be a positive integer. + + + + + V4L2_CID_ZOOM_RELATIVE  + integer + Specify the objective lens +focal length relatively to the current value. Positive values move the zoom +lens group towards the telephoto direction, negative values towards the +wide-angle direction. The zoom unit is driver-specific. This is a write-only control. + + + + + V4L2_CID_ZOOM_CONTINUOUS  + integer + Move the objective lens group +at the specified speed until it reaches physical device limits or until an +explicit request to stop the movement. A positive value moves the zoom lens +group towards the telephoto direction. A value of zero stops the zoom lens +group movement. A negative value moves the zoom lens group towards the +wide-angle direction. The zoom speed unit is driver-specific. + + + + + V4L2_CID_IRIS_ABSOLUTE  + integer + This control sets the +camera's aperture to the specified value. The unit is undefined. +Larger values open the iris wider, smaller values close it. + + + + + V4L2_CID_IRIS_RELATIVE  + integer + This control modifies the +camera's aperture by the specified amount. The unit is undefined. +Positive values open the iris one step further, negative values close +it one step further. This is a write-only control. + + + + + V4L2_CID_PRIVACY  + boolean + Prevent video from being acquired +by the camera. When this control is set to TRUE (1), no +image can be captured by the camera. Common means to enforce privacy are +mechanical obturation of the sensor and firmware image processing, but the +device is not restricted to these methods. Devices that implement the privacy +control must support read access and may support write access. + + + + V4L2_CID_BAND_STOP_FILTER  + integer + Switch the band-stop filter of a +camera sensor on or off, or specify its strength. Such band-stop filters can +be used, for example, to filter out the fluorescent light component. + + + + +
+
+ +
+ FM Transmitter Control Reference + + The FM Transmitter (FM_TX) class includes controls for common features of +FM transmissions capable devices. Currently this class includes parameters for audio +compression, pilot tone generation, audio deviation limiter, RDS transmission and +tuning power features. + + + FM_TX Control IDs + + + + + + + + + + + ID + Type + Description + + + + + + V4L2_CID_FM_TX_CLASS  + class + The FM_TX class +descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a +description of this control class. + + + V4L2_CID_RDS_TX_DEVIATION  + integer + + Configures RDS signal frequency deviation level in Hz. +The range and step are driver-specific. + + + V4L2_CID_RDS_TX_PI  + integer + + Sets the RDS Programme Identification field +for transmission. + + + V4L2_CID_RDS_TX_PTY  + integer + + Sets the RDS Programme Type field for transmission. +This encodes up to 31 pre-defined programme types. + + + V4L2_CID_RDS_TX_PS_NAME  + string + + Sets the Programme Service name (PS_NAME) for transmission. +It is intended for static display on a receiver. It is the primary aid to listeners in programme service +identification and selection. In Annex E of , the RDS specification, +there is a full description of the correct character encoding for Programme Service name strings. +Also from RDS specification, PS is usually a single eight character text. However, it is also possible +to find receivers which can scroll strings sized as 8 x N characters. So, this control must be configured +with steps of 8 characters. The result is it must always contain a string with size multiple of 8. + + + V4L2_CID_RDS_TX_RADIO_TEXT  + string + + Sets the Radio Text info for transmission. It is a textual description of +what is being broadcasted. RDS Radio Text can be applied when broadcaster wishes to transmit longer PS names, +programme-related information or any other text. In these cases, RadioText should be used in addition to +V4L2_CID_RDS_TX_PS_NAME. The encoding for Radio Text strings is also fully described +in Annex E of . The length of Radio Text strings depends on which RDS Block is being +used to transmit it, either 32 (2A block) or 64 (2B block). However, it is also possible +to find receivers which can scroll strings sized as 32 x N or 64 x N characters. So, this control must be configured +with steps of 32 or 64 characters. The result is it must always contain a string with size multiple of 32 or 64. + + + V4L2_CID_AUDIO_LIMITER_ENABLED  + boolean + + Enables or disables the audio deviation limiter feature. +The limiter is useful when trying to maximize the audio volume, minimize receiver-generated +distortion and prevent overmodulation. + + + + V4L2_CID_AUDIO_LIMITER_RELEASE_TIME  + integer + + Sets the audio deviation limiter feature release time. +Unit is in useconds. Step and range are driver-specific. + + + V4L2_CID_AUDIO_LIMITER_DEVIATION  + integer + + Configures audio frequency deviation level in Hz. +The range and step are driver-specific. + + + V4L2_CID_AUDIO_COMPRESSION_ENABLED  + boolean + + Enables or disables the audio compression feature. +This feature amplifies signals below the threshold by a fixed gain and compresses audio +signals above the threshold by the ratio of Threshold/(Gain + Threshold). + + + V4L2_CID_AUDIO_COMPRESSION_GAIN  + integer + + Sets the gain for audio compression feature. It is +a dB value. The range and step are driver-specific. + + + V4L2_CID_AUDIO_COMPRESSION_THRESHOLD  + integer + + Sets the threshold level for audio compression freature. +It is a dB value. The range and step are driver-specific. + + + V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME  + integer + + Sets the attack time for audio compression feature. +It is a useconds value. The range and step are driver-specific. + + + V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME  + integer + + Sets the release time for audio compression feature. +It is a useconds value. The range and step are driver-specific. + + + V4L2_CID_PILOT_TONE_ENABLED  + boolean + + Enables or disables the pilot tone generation feature. + + + V4L2_CID_PILOT_TONE_DEVIATION  + integer + + Configures pilot tone frequency deviation level. Unit is +in Hz. The range and step are driver-specific. + + + V4L2_CID_PILOT_TONE_FREQUENCY  + integer + + Configures pilot tone frequency value. Unit is +in Hz. The range and step are driver-specific. + + + V4L2_CID_TUNE_PREEMPHASIS  + integer + + Configures the pre-emphasis value for broadcasting. +A pre-emphasis filter is applied to the broadcast to accentuate the high audio frequencies. +Depending on the region, a time constant of either 50 or 75 useconds is used. The enum v4l2_preemphasis +defines possible values for pre-emphasis. Here they are: + + + + + V4L2_PREEMPHASIS_DISABLED  + No pre-emphasis is applied. + + + V4L2_PREEMPHASIS_50_uS  + A pre-emphasis of 50 uS is used. + + + V4L2_PREEMPHASIS_75_uS  + A pre-emphasis of 75 uS is used. + + + + + + + V4L2_CID_TUNE_POWER_LEVEL  + integer + + Sets the output power level for signal transmission. +Unit is in dBuV. Range and step are driver-specific. + + + V4L2_CID_TUNE_ANTENNA_CAPACITOR  + integer + + This selects the value of antenna tuning capacitor +manually or automatically if set to zero. Unit, range and step are driver-specific. + + + + +
+ +For more details about RDS specification, refer to + document, from CENELEC. +
+ +
+ Flash Control Reference + + + Experimental + + This is an experimental +interface and may change in the future. + + + + The V4L2 flash controls are intended to provide generic access + to flash controller devices. Flash controller devices are + typically used in digital cameras. + + + + The interface can support both LED and xenon flash devices. As + of writing this, there is no xenon flash driver using this + interface. + + +
+ Supported use cases + +
+ Unsynchronised LED flash (software strobe) + + + Unsynchronised LED flash is controlled directly by the + host as the sensor. The flash must be enabled by the host + before the exposure of the image starts and disabled once + it ends. The host is fully responsible for the timing of + the flash. + + + Example of such device: Nokia N900. +
+ +
+ Synchronised LED flash (hardware strobe) + + + The synchronised LED flash is pre-programmed by the host + (power and timeout) but controlled by the sensor through a + strobe signal from the sensor to the flash. + + + + The sensor controls the flash duration and timing. This + information typically must be made available to the + sensor. + + +
+ +
+ LED flash as torch + + + LED flash may be used as torch in conjunction with another + use case involving camera or individually. + + +
+ +
+ + + Flash Control IDs + + + + + + + + + + + ID + Type + Description + + + + + + V4L2_CID_FLASH_CLASS + class + + + The FLASH class descriptor. + + + V4L2_CID_FLASH_LED_MODE + menu + + + Defines the mode of the flash LED, + the high-power white LED attached to the flash controller. + Setting this control may not be possible in presence of + some faults. See V4L2_CID_FLASH_FAULT. + + + + + + V4L2_FLASH_LED_MODE_NONE + Off. + + + V4L2_FLASH_LED_MODE_FLASH + Flash mode. + + + V4L2_FLASH_LED_MODE_TORCH + Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY. + + + + + + V4L2_CID_FLASH_STROBE_SOURCE + menu + + Defines the source of the flash LED + strobe. + + + + + + V4L2_FLASH_STROBE_SOURCE_SOFTWARE + The flash strobe is triggered by using + the V4L2_CID_FLASH_STROBE control. + + + V4L2_FLASH_STROBE_SOURCE_EXTERNAL + The flash strobe is triggered by an + external source. Typically this is a sensor, + which makes it possible to synchronises the + flash strobe start to exposure start. + + + + + + V4L2_CID_FLASH_STROBE + button + + + Strobe flash. Valid when + V4L2_CID_FLASH_LED_MODE is set to + V4L2_FLASH_LED_MODE_FLASH and V4L2_CID_FLASH_STROBE_SOURCE + is set to V4L2_FLASH_STROBE_SOURCE_SOFTWARE. Setting this + control may not be possible in presence of some faults. + See V4L2_CID_FLASH_FAULT. + + + V4L2_CID_FLASH_STROBE_STOP + button + + Stop flash strobe immediately. + + + V4L2_CID_FLASH_STROBE_STATUS + boolean + + + Strobe status: whether the flash + is strobing at the moment or not. This is a read-only + control. + + + V4L2_CID_FLASH_TIMEOUT + integer + + + Hardware timeout for flash. The + flash strobe is stopped after this period of time has + passed from the start of the strobe. + + + V4L2_CID_FLASH_INTENSITY + integer + + + Intensity of the flash strobe when + the flash LED is in flash mode + (V4L2_FLASH_LED_MODE_FLASH). The unit should be milliamps + (mA) if possible. + + + V4L2_CID_FLASH_TORCH_INTENSITY + integer + + + Intensity of the flash LED in + torch mode (V4L2_FLASH_LED_MODE_TORCH). The unit should be + milliamps (mA) if possible. Setting this control may not + be possible in presence of some faults. See + V4L2_CID_FLASH_FAULT. + + + V4L2_CID_FLASH_INDICATOR_INTENSITY + integer + + + Intensity of the indicator LED. + The indicator LED may be fully independent of the flash + LED. The unit should be microamps (uA) if possible. + + + V4L2_CID_FLASH_FAULT + bitmask + + + Faults related to the flash. The + faults tell about specific problems in the flash chip + itself or the LEDs attached to it. Faults may prevent + further use of some of the flash controls. In particular, + V4L2_CID_FLASH_LED_MODE is set to V4L2_FLASH_LED_MODE_NONE + if the fault affects the flash LED. Exactly which faults + have such an effect is chip dependent. Reading the faults + resets the control and returns the chip to a usable state + if possible. + + + + + + V4L2_FLASH_FAULT_OVER_VOLTAGE + Flash controller voltage to the flash LED + has exceeded the limit specific to the flash + controller. + + + V4L2_FLASH_FAULT_TIMEOUT + The flash strobe was still on when + the timeout set by the user --- + V4L2_CID_FLASH_TIMEOUT control --- has expired. + Not all flash controllers may set this in all + such conditions. + + + V4L2_FLASH_FAULT_OVER_TEMPERATURE + The flash controller has overheated. + + + V4L2_FLASH_FAULT_SHORT_CIRCUIT + The short circuit protection of the flash + controller has been triggered. + + + + + + V4L2_CID_FLASH_CHARGE + boolean + + Enable or disable charging of the xenon + flash capacitor. + + + V4L2_CID_FLASH_READY + boolean + + + Is the flash ready to strobe? + Xenon flashes require their capacitors charged before + strobing. LED flashes often require a cooldown period + after strobe during which another strobe will not be + possible. This is a read-only control. + + + + +
+ +
+
+ + diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml index deb660207f9..8af0a42e84a 100644 --- a/Documentation/DocBook/v4l/pixfmt.xml +++ b/Documentation/DocBook/v4l/pixfmt.xml @@ -741,10 +741,55 @@ information. V4L2_PIX_FMT_MPEG 'MPEG' - MPEG stream. The actual format is determined by + MPEG multiplexed stream. The actual format is determined by extended control V4L2_CID_MPEG_STREAM_TYPE, see . + + V4L2_PIX_FMT_H264 + 'H264' + H264 video elementary stream with start codes. + + + V4L2_PIX_FMT_H264_NO_SC + 'AVC1' + H264 video elementary stream without start codes. + + + V4L2_PIX_FMT_H263 + 'H263' + H263 video elementary stream. + + + V4L2_PIX_FMT_MPEG1 + 'MPG1' + MPEG1 video elementary stream. + + + V4L2_PIX_FMT_MPEG2 + 'MPG2' + MPEG2 video elementary stream. + + + V4L2_PIX_FMT_MPEG4 + 'MPG4' + MPEG4 video elementary stream. + + + V4L2_PIX_FMT_XVID + 'XVID' + Xvid video elementary stream. + + + V4L2_PIX_FMT_VC1_ANNEX_G + 'VC1G' + VC1, SMPTE 421M Annex G compliant stream. + + + V4L2_PIX_FMT_VC1_ANNEX_L + 'VC1L' + VC1, SMPTE 421M Annex L compliant stream. + diff --git a/Documentation/HOWTO b/Documentation/HOWTO index 81bc1a9ab9d..59c080f084e 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO @@ -218,16 +218,16 @@ The development process Linux kernel development process currently consists of a few different main kernel "branches" and lots of different subsystem-specific kernel branches. These different branches are: - - main 2.6.x kernel tree - - 2.6.x.y -stable kernel tree - - 2.6.x -git kernel patches + - main 3.x kernel tree + - 3.x.y -stable kernel tree + - 3.x -git kernel patches - subsystem specific kernel trees and patches - - the 2.6.x -next kernel tree for integration tests + - the 3.x -next kernel tree for integration tests -2.6.x kernel tree +3.x kernel tree ----------------- -2.6.x kernels are maintained by Linus Torvalds, and can be found on -kernel.org in the pub/linux/kernel/v2.6/ directory. Its development +3.x kernels are maintained by Linus Torvalds, and can be found on +kernel.org in the pub/linux/kernel/v3.x/ directory. Its development process is as follows: - As soon as a new kernel is released a two weeks window is open, during this period of time maintainers can submit big diffs to @@ -262,21 +262,21 @@ mailing list about kernel releases: released according to perceived bug status, not according to a preconceived timeline." -2.6.x.y -stable kernel tree +3.x.y -stable kernel tree --------------------------- -Kernels with 4-part versions are -stable kernels. They contain +Kernels with 3-part versions are -stable kernels. They contain relatively small and critical fixes for security problems or significant -regressions discovered in a given 2.6.x kernel. +regressions discovered in a given 3.x kernel. This is the recommended branch for users who want the most recent stable kernel and are not interested in helping test development/experimental versions. -If no 2.6.x.y kernel is available, then the highest numbered 2.6.x +If no 3.x.y kernel is available, then the highest numbered 3.x kernel is the current stable kernel. -2.6.x.y are maintained by the "stable" team , and are -released as needs dictate. The normal release period is approximately +3.x.y are maintained by the "stable" team , and +are released as needs dictate. The normal release period is approximately two weeks, but it can be longer if there are no pressing problems. A security-related problem, instead, can cause a release to happen almost instantly. @@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree documents what kinds of changes are acceptable for the -stable tree, and how the release process works. -2.6.x -git patches +3.x -git patches ------------------ These are daily snapshots of Linus' kernel tree which are managed in a git repository (hence the name.) These patches are usually released @@ -317,13 +317,13 @@ revisions to it, and maintainers can mark patches as under review, accepted, or rejected. Most of these patchwork sites are listed at http://patchwork.kernel.org/. -2.6.x -next kernel tree for integration tests +3.x -next kernel tree for integration tests --------------------------------------------- -Before updates from subsystem trees are merged into the mainline 2.6.x +Before updates from subsystem trees are merged into the mainline 3.x tree, they need to be integration-tested. For this purpose, a special testing repository exists into which virtually all subsystem trees are pulled on an almost daily basis: - http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git + http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git http://linux.f-seidel.de/linux-next/pmwiki/ This way, the -next kernel gives a summary outlook onto what will be diff --git a/Documentation/block/row-iosched.txt b/Documentation/block/row-iosched.txt new file mode 100644 index 00000000000..987bd883444 --- /dev/null +++ b/Documentation/block/row-iosched.txt @@ -0,0 +1,117 @@ +Introduction +============ + +The ROW scheduling algorithm will be used in mobile devices as default +block layer IO scheduling algorithm. ROW stands for "READ Over WRITE" +which is the main requests dispatch policy of this algorithm. + +The ROW IO scheduler was developed with the mobile devices needs in +mind. In mobile devices we favor user experience upon everything else, +thus we want to give READ IO requests as much priority as possible. +The main idea of the ROW scheduling policy is: +If there are READ requests in pipe - dispatch them but don't starve +the WRITE requests too much. + +Software description +==================== +The requests are kept in queues according to their priority. The +dispatching of requests is done in a Round Robin manner with a +different slice for each queue. The dispatch quantum for a specific +queue is defined according to the queues priority. READ queues are +given bigger dispatch quantum than the WRITE queues, within a dispatch +cycle. + +At the moment there are 6 types of queues the requests are +distributed to: +- High priority READ queue +- High priority Synchronous WRITE queue +- Regular priority READ queue +- Regular priority Synchronous WRITE queue +- Regular priority WRITE queue +- Low priority READ queue + +If in a certain dispatch cycle one of the queues was empty and didn't +use its quantum that queue will be marked as "un-served". If we're in a +middle of a dispatch cycle dispatching from queue Y and a request +arrives for queue X that was un-served in the previous cycle, if X's +priority is higher than Y's, queue X will be preempted in the favor of +queue Y. This won't mean that cycle is restarted. The "dispatched" +counter of queue X will remain unchanged. Once queue Y uses up it's quantum +(or there will be no more requests left on it) we'll switch back to queue X +and allow it to finish it's quantum. + +For READ requests queues we allow idling in within a dispatch quantum in +order to give the application a chance to insert more requests. Idling +means adding some extra time for serving a certain queue even if the +queue is empty. The idling is enabled if we identify the application is +inserting requests in a high frequency. + +For idling on READ queues we use timer mechanism. When the timer expires, +if there are requests in the scheduler we will signal the underlying driver +(for example the MMC driver) to fetch another request for dispatch. + +The ROW algorithm takes the scheduling policy one step further, making +it a bit more "user-needs oriented", by allowing the application to +hint on the urgency of its requests. For example: even among the READ +requests several requests may be more urgent for completion then others. +The former will go to the High priority READ queue, that is given the +bigger dispatch quantum than any other queue. + +ROW scheduler will support special services for block devices that +supports High Priority Requests. That is, the scheduler may inform the +device upon urgent requests using new callback make_urgent_request. +In addition it will support rescheduling of requests that were +interrupted. For example, if the device issues a long write request and +a sudden high priority read interrupt pops in, the scheduler will +inform the device about the urgent request, so the device can stop the +current write request and serve the high priority read request. In such +a case the device may also send back to the scheduler the reminder of +the interrupted write request, such that the scheduler may continue +sending high priority requests without the need to interrupt the +ongoing write again and again. The write remainder will be sent later on +according to the scheduler policy. + +Design +====== +Existing algorithms (cfq, deadline) sort the io requests according LBA. +When deciding on the next request to dispatch they choose the closest +request to the current disk head position (from handling last +dispatched request). This is done in order to reduce the disk head +movement to a minimum. +We feel that this functionality isn't really needed in mobile devices. +Usually applications that write/read large chunks of data insert the +requests in already sorted LBA order. Thus dealing with sort trees adds +unnecessary complexity. + +We're planing to try this enhancement in the future to check if the +performance is influenced by it. + +SMP/multi-core +============== +At the moment the code is acceded from 2 contexts: +- Application context (from block/elevator layer): adding the requests. +- Underlying driver context (for example the mmc driver thread): dispatching + the requests and notifying on completion. + +One lock is used to synchronize between the two. This lock is provided +by the underlying driver along with the dispatch queue. + +Config options +============== +1. hp_read_quantum: dispatch quantum for the high priority READ queue +2. rp_read_quantum: dispatch quantum for the regular priority READ queue +3. hp_swrite_quantum: dispatch quantum for the high priority Synchronous + WRITE queue +4. rp_swrite_quantum: dispatch quantum for the regular priority + Synchronous WRITE queue +5. rp_write_quantum: dispatch quantum for the regular priority WRITE + queue +6. lp_read_quantum: dispatch quantum for the low priority READ queue +7. lp_swrite_quantum: dispatch quantum for the low priority Synchronous + WRITE queue +8. read_idle: how long to idle on read queue in Msec (in case idling + is enabled on that queue). +9. read_idle_freq: frequency of inserting READ requests that will + trigger idling. This is the time in Msec between inserting two READ + requests + diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting index 903a2546f13..8a48c9b6286 100644 --- a/Documentation/development-process/5.Posting +++ b/Documentation/development-process/5.Posting @@ -271,10 +271,10 @@ copies should go to: the linux-kernel list. - If you are fixing a bug, think about whether the fix should go into the - next stable update. If so, stable@kernel.org should get a copy of the - patch. Also add a "Cc: stable@kernel.org" to the tags within the patch - itself; that will cause the stable team to get a notification when your - fix goes into the mainline. + next stable update. If so, stable@vger.kernel.org should get a copy of + the patch. Also add a "Cc: stable@vger.kernel.org" to the tags within + the patch itself; that will cause the stable team to get a notification + when your fix goes into the mainline. When selecting recipients for a patch, it is good to have an idea of who you think will eventually accept the patch and get it merged. While it diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware index 3348d313fbe..511dd4df04c 100644 --- a/Documentation/dvb/get_dvb_firmware +++ b/Documentation/dvb/get_dvb_firmware @@ -114,7 +114,7 @@ sub tda10045 { sub tda10046 { my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip"; - my $url = "http://www.tt-download.com/download/updates/219/$sourcefile"; + my $url = "http://technotrend.com.ua/download/software/219/$sourcefile"; my $hash = "6a7e1e2f2644b162ff0502367553c72d"; my $outfile = "dvb-fe-tda10046.fw"; my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index b1c921c2751..6ff5af4499c 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -6,14 +6,6 @@ be removed from this file. --------------------------- -What: x86 floppy disable_hlt -When: 2012 -Why: ancient workaround of dubious utility clutters the - code used by everybody else. -Who: Len Brown - ---------------------------- - What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle When: 2012 Why: This optional sub-feature of APM is of dubious reliability, diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42 index a22ecf48f25..52729a756c1 100644 --- a/Documentation/hwmon/jc42 +++ b/Documentation/hwmon/jc42 @@ -7,21 +7,29 @@ Supported chips: Addresses scanned: I2C 0x18 - 0x1f Datasheets: http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf - * IDT TSE2002B3, TS3000B3 - Prefix: 'tse2002b3', 'ts3000b3' + * Atmel AT30TS00 + Prefix: 'at30ts00' Addresses scanned: I2C 0x18 - 0x1f Datasheets: - http://www.idt.com/products/getdoc.cfm?docid=18715691 - http://www.idt.com/products/getdoc.cfm?docid=18715692 + http://www.atmel.com/Images/doc8585.pdf + * IDT TSE2002B3, TSE2002GB2, TS3000B3, TS3000GB2 + Prefix: 'tse2002', 'ts3000' + Addresses scanned: I2C 0x18 - 0x1f + Datasheets: + http://www.idt.com/sites/default/files/documents/IDT_TSE2002B3C_DST_20100512_120303152056.pdf + http://www.idt.com/sites/default/files/documents/IDT_TSE2002GB2A1_DST_20111107_120303145914.pdf + http://www.idt.com/sites/default/files/documents/IDT_TS3000B3A_DST_20101129_120303152013.pdf + http://www.idt.com/sites/default/files/documents/IDT_TS3000GB2A1_DST_20111104_120303151012.pdf * Maxim MAX6604 Prefix: 'max6604' Addresses scanned: I2C 0x18 - 0x1f Datasheets: http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf - * Microchip MCP9805, MCP98242, MCP98243, MCP9843 - Prefixes: 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843' + * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843 + Prefixes: 'mcp9804', 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843' Addresses scanned: I2C 0x18 - 0x1f Datasheets: + http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf @@ -48,6 +56,12 @@ Supported chips: Datasheets: http://www.st.com/stonline/products/literature/ds/13447/stts424.pdf http://www.st.com/stonline/products/literature/ds/13448/stts424e02.pdf + * ST Microelectronics STTS2002, STTS3000 + Prefix: 'stts2002', 'stts3000' + Addresses scanned: I2C 0x18 - 0x1f + Datasheets: + http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATASHEET/CD00225278.pdf + http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATA_BRIEF/CD00270920.pdf * JEDEC JC 42.4 compliant temperature sensor chips Prefix: 'jc42' Addresses scanned: I2C 0x18 - 0x1f diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index aa47be71df4..389923426ca 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -531,6 +531,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. UART at the specified I/O port or MMIO address, switching to the matching ttyS device later. The options are the same as for ttyS, above. + hvc Use the hypervisor console device . This is for + both Xen and PowerPC hypervisors. If the device connected to the port is not a TTY but a braille device, prepend "brl," before the device type, for instance @@ -679,6 +681,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. earlyprintk= [X86,SH,BLACKFIN] earlyprintk=vga + earlyprintk=xen earlyprintk=serial[,ttySn[,baudrate]] earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] @@ -696,6 +699,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. The VGA output is eventually overwritten by the real console. + The xen output can only be used by Xen PV guests. + ekgdboc= [X86,KGDB] Allow early kernel console debugging ekgdboc=kbd @@ -1764,6 +1769,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. noresidual [PPC] Don't use residual data on PReP machines. + nordrand [X86] Disable the direct use of the RDRAND + instruction even if it is supported by the + processor. RDRAND is still available to user + space applications. + noresume [SWSUSP] Disables resume and restores original swap space. diff --git a/Documentation/networking/ifenslave.c b/Documentation/networking/ifenslave.c index 2bac9618c34..50f1dc49f48 100644 --- a/Documentation/networking/ifenslave.c +++ b/Documentation/networking/ifenslave.c @@ -539,12 +539,14 @@ static int if_getconfig(char *ifname) metric = 0; } else metric = ifr.ifr_metric; + printf("The result of SIOCGIFMETRIC is %d\n", metric); strcpy(ifr.ifr_name, ifname); if (ioctl(skfd, SIOCGIFMTU, &ifr) < 0) mtu = 0; else mtu = ifr.ifr_mtu; + printf("The result of SIOCGIFMTU is %d\n", mtu); strcpy(ifr.ifr_name, ifname); if (ioctl(skfd, SIOCGIFDSTADDR, &ifr) < 0) { diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index bfe924217f2..890fce9b368 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale), if it is <= 0. Possible values are [-31, 31], inclusive. - Default: 2 + Default: 1 tcp_allowed_congestion_control - STRING Show/set the congestion control choices available to non-privileged @@ -407,7 +407,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables automatic tuning of that socket's receive buffer size, in which case this value is ignored. - Default: between 87380B and 4MB, depending on RAM size. + Default: between 87380B and 6MB, depending on RAM size. tcp_sack - BOOLEAN Enable select acknowledgments (SACKS). @@ -534,6 +534,11 @@ tcp_thin_dupack - BOOLEAN Documentation/networking/tcp-thin.txt Default: 0 +tcp_challenge_ack_limit - INTEGER + Limits number of Challenge ACK sent per second, as recommended + in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) + Default: 100 + UDP variables: udp_mem - vector of 3 INTEGERs: min, pressure, max diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt index 21fd05c28e7..22bf11b846c 100644 --- a/Documentation/stable_kernel_rules.txt +++ b/Documentation/stable_kernel_rules.txt @@ -1,4 +1,4 @@ -Everything you ever wanted to know about Linux 2.6 -stable releases. +Everything you ever wanted to know about Linux -stable releases. Rules on what kind of patches are accepted, and which ones are not, into the "-stable" tree: @@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the marked CONFIG_BROKEN), an oops, a hang, data corruption, a real security issue, or some "oh, that's not good" issue. In short, something critical. + - Serious issues as reported by a user of a distribution kernel may also + be considered if they fix a notable performance or interactivity issue. + As these fixes are not as obvious and have a higher risk of a subtle + regression they should only be submitted by a distribution kernel + maintainer and include an addendum linking to a bugzilla entry if it + exists and additional information on the user-visible impact. - New device IDs and quirks are also accepted. - No "theoretical race condition" issues, unless an explanation of how the race can be exploited is also provided. @@ -35,10 +41,10 @@ Procedure for submitting patches to the -stable tree: cherry-picked than this can be specified in the following format in the sign-off area: - Cc: # .32.x: a1f84a3: sched: Check for idle - Cc: # .32.x: 1b9508f: sched: Rate-limit newidle - Cc: # .32.x: fd21073: sched: Fix affinity logic - Cc: # .32.x + Cc: # 3.3.x: a1f84a3: sched: Check for idle + Cc: # 3.3.x: 1b9508f: sched: Rate-limit newidle + Cc: # 3.3.x: fd21073: sched: Fix affinity logic + Cc: # 3.3.x Signed-off-by: Ingo Molnar The tag sequence has the meaning of: @@ -72,6 +78,15 @@ Review cycle: security kernel team, and not go through the normal review cycle. Contact the kernel security team for more details on this procedure. +Trees: + + - The queues of patches, for both completed versions and in progress + versions can be found at: + http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git + - The finalized and tagged releases of all stable kernels can be found + in separate branches per version at: + http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git + Review committee: diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl index 12cecc83cd9..4a37c4759cd 100644 --- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl +++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl @@ -379,10 +379,10 @@ sub process_events { # To closer match vmstat scanning statistics, only count isolate_both # and isolate_inactive as scanning. isolate_active is rotation - # isolate_inactive == 0 - # isolate_active == 1 - # isolate_both == 2 - if ($isolate_mode != 1) { + # isolate_inactive == 1 + # isolate_active == 2 + # isolate_both == 3 + if ($isolate_mode != 2) { $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; } $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty; diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt index a4efa0462f0..5335fa8b06e 100644 --- a/Documentation/usb/usbmon.txt +++ b/Documentation/usb/usbmon.txt @@ -47,10 +47,11 @@ This allows to filter away annoying devices that talk continuously. 2. Find which bus connects to the desired device -Run "cat /proc/bus/usb/devices", and find the T-line which corresponds to -the device. Usually you do it by looking for the vendor string. If you have -many similar devices, unplug one and compare two /proc/bus/usb/devices outputs. -The T-line will have a bus number. Example: +Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds +to the device. Usually you do it by looking for the vendor string. If you have +many similar devices, unplug one and compare the two +/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number. +Example: T: Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 @@ -58,7 +59,10 @@ P: Vendor=0557 ProdID=2004 Rev= 1.00 S: Manufacturer=ATEN S: Product=UC100KM V2.00 -Bus=03 means it's bus 3. +"Bus=03" means it's bus 3. Alternatively, you can look at the output from +"lsusb" and get the bus number from the appropriate line. Example: + +Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00 3. Start 'cat' diff --git a/MAINTAINERS b/MAINTAINERS index 34e24186584..9b893d75d85 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2491,7 +2491,7 @@ S: Maintained F: drivers/net/eexpress.* ETHERNET BRIDGE -M: Stephen Hemminger +M: Stephen Hemminger L: bridge@lists.linux-foundation.org L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net:Bridge @@ -4327,7 +4327,7 @@ S: Supported F: drivers/infiniband/hw/nes/ NETEM NETWORK EMULATOR -M: Stephen Hemminger +M: Stephen Hemminger L: netem@lists.linux-foundation.org S: Maintained F: net/sched/sch_netem.c @@ -5247,7 +5247,7 @@ F: Documentation/blockdev/ramdisk.txt F: drivers/block/brd.c RANDOM NUMBER DRIVER -M: Matt Mackall +M: Theodore Ts'o" S: Maintained F: drivers/char/random.c @@ -5779,7 +5779,7 @@ S: Maintained F: drivers/usb/misc/sisusbvga/ SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS -M: Stephen Hemminger +M: Stephen Hemminger L: netdev@vger.kernel.org S: Maintained F: drivers/net/skge.* @@ -6039,7 +6039,7 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman -L: stable@kernel.org +L: stable@vger.kernel.org S: Maintained STAGING SUBSYSTEM diff --git a/Makefile b/Makefile index 6d356e7dca4..082934a8d79 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 16 +SUBLEVEL = 81 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index e756d04b6cd..b15162fedcc 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -14,8 +14,8 @@ */ -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index e8a761aee08..f939794363a 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " lda $31,3b-2b(%0)\n" " .previous\n" : "+r"(ret), "=&r"(prev), "=&r"(cmp) - : "r"(uaddr), "r"((long)oldval), "r"(newval) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) : "memory"); *uval = prev; diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index 06edfefc337..3eeb47c5018 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h @@ -69,9 +69,11 @@ #define SO_RXQ_OVFL 40 +#ifdef __KERNEL__ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 +#endif /* __KERNEL__ */ #endif /* _ASM_SOCKET_H */ diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 99c0f46f6b9..dc616b34610 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -189,6 +189,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -210,6 +214,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7f6fc98a0d9..679a96d5fa2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1199,7 +1199,7 @@ config ARM_ERRATA_743622 depends on CPU_V7 help This option enables the workaround for the 743622 Cortex-A9 - (r2p0..r2p2) erratum. Under very rare conditions, a faulty + (r2p*) erratum. Under very rare conditions, a faulty optimisation in the Cortex-A9 Store Buffer may lead to data corruption. This workaround sets a specific bit in the diagnostic register of the Cortex-A9 which disables the Store Buffer @@ -1277,6 +1277,42 @@ config KSAPI Scorpion processor supported hardware performance counters on a per thread basis or AXI counters on an overall system basis. +config ARM_ERRATA_764369 + bool "ARM errata: Data cache line maintenance operation by MVA may not succeed" + depends on CPU_V7 && SMP + help + This option enables the workaround for erratum 764369 + affecting Cortex-A9 MPCore with two or more processors (all + current revisions). Under certain timing circumstances, a data + cache line maintenance operation by MVA targeting an Inner + Shareable memory region may fail to proceed up to either the + Point of Coherency or to the Point of Unification of the + system. This workaround adds a DSB instruction before the + relevant cache maintenance functions and sets a specific bit + in the diagnostic control register of the SCU. + +config PL310_ERRATA_769419 + bool "PL310 errata: no automatic Store Buffer drain" + depends on CACHE_L2X0 + help + On revisions of the PL310 prior to r3p2, the Store Buffer does + not automatically drain. This can cause normal, non-cacheable + writes to be retained when the memory system is idle, leading + to suboptimal I/O performance for drivers using coherent DMA. + This option adds a write barrier to the cpu_idle loop so that, + on systems with an outer cache, the store buffer is drained + explicitly. + +config ARM_ERRATA_775420 + bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" + depends on CPU_V7 + help + This option enables the workaround for the 775420 Cortex-A9 (r2p2, + r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance + operation aborts with MMU exception, it might cause the processor + to deadlock. This workaround puts DSB before executing ISB if + an abort may occur on cache maintenance. + endmenu source "arch/arm/common/Kconfig" @@ -1341,32 +1377,6 @@ source "drivers/pci/Kconfig" source "drivers/pcmcia/Kconfig" -config ARM_ERRATA_764369 - bool "ARM errata: Data cache line maintenance operation by MVA may not succeed" - depends on CPU_V7 && SMP - help - This option enables the workaround for erratum 764369 - affecting Cortex-A9 MPCore with two or more processors (all - current revisions). Under certain timing circumstances, a data - cache line maintenance operation by MVA targeting an Inner - Shareable memory region may fail to proceed up to either the - Point of Coherency or to the Point of Unification of the - system. This workaround adds a DSB instruction before the - relevant cache maintenance functions and sets a specific bit - in the diagnostic control register of the SCU. - -config PL310_ERRATA_769419 - bool "PL310 errata: no automatic Store Buffer drain" - depends on CACHE_L2X0 - help - On revisions of the PL310 prior to r3p2, the Store Buffer does - not automatically drain. This can cause normal, non-cacheable - writes to be retained when the memory system is idle, leading - to suboptimal I/O performance for drivers using coherent DMA. - This option adds a write barrier to the cpu_idle loop so that, - on systems with an outer cache, the store buffer is drained - explicitly. - endmenu menu "Kernel Features" @@ -2012,6 +2022,7 @@ source "drivers/cpufreq/Kconfig" config CPU_FREQ_IMX tristate "CPUfreq driver for i.MX CPUs" depends on ARCH_MXC && CPU_FREQ + select CPU_FREQ_TABLE help This enables the CPUfreq driver for i.MX CPUs. diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 7f35d0c2912..e609e52a62d 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -654,6 +654,7 @@ __armv7_mmu_cache_on: mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg + bic r0, r0, #1 << 28 @ clear SCTLR.TRE orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer #ifdef CONFIG_MMU diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index 2bf224310fb..166d6aa97c4 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -29,7 +29,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_AEABI=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_AUTO_ZRELADDR=y CONFIG_FPE_NWFPE=y CONFIG_NET=y diff --git a/arch/arm/configs/vigor_aosp_defconfig b/arch/arm/configs/vigor_aosp_defconfig index 524aaa3e9ba..1a683a649b3 100644 --- a/arch/arm/configs/vigor_aosp_defconfig +++ b/arch/arm/configs/vigor_aosp_defconfig @@ -27,6 +27,7 @@ CONFIG_VECTORS_BASE=0xffff0000 # CONFIG_ARM_PATCH_PHYS_VIRT is not set CONFIG_GENERIC_BUG=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_MSM_MULTIMEDIA_USE_ION=y CONFIG_HAVE_IRQ_WORK=y # @@ -44,7 +45,7 @@ CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_LZO is not set CONFIG_DEFAULT_HOSTNAME="(none)" -# CONFIG_SWAP is not set +CONFIG_SWAP=y # CONFIG_SYSVIPC is not set # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set @@ -166,11 +167,13 @@ CONFIG_LBDAF=y # CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_ROW=y CONFIG_IOSCHED_CFQ=y -CONFIG_DEFAULT_DEADLINE=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_ROW=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_IOSCHED="row" # CONFIG_INLINE_SPIN_TRYLOCK is not set # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set # CONFIG_INLINE_SPIN_LOCK is not set @@ -624,9 +627,9 @@ CONFIG_CPU_FREQ_TABLE=y # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -1019,8 +1022,8 @@ CONFIG_BT_SCO=y CONFIG_BT_RFCOMM=y CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=y -# CONFIG_BT_BNEP_MC_FILTER is not set -# CONFIG_BT_BNEP_PROTO_FILTER is not set +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_HIDP=y # @@ -1092,6 +1095,9 @@ CONFIG_EXTRA_FIRMWARE="" # CONFIG_SYS_HYPERVISOR is not set CONFIG_GENLOCK=y CONFIG_GENLOCK_MISCDEVICE=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y # CONFIG_CONNECTOR is not set # CONFIG_MTD is not set # CONFIG_PARPORT is not set @@ -1237,7 +1243,7 @@ CONFIG_LIBRA_SDIOIF=m CONFIG_BCMDHD=m CONFIG_BCMDHD_FW_PATH="/system/vendor/firmware/fw_bcmdhd.bin" CONFIG_BCMDHD_NVRAM_PATH="/proc/calibration" -# CONFIG_DHD_USE_STATIC_BUF is not set +CONFIG_DHD_USE_STATIC_BUF=y # CONFIG_DHD_USE_SCHED_SCAN is not set # CONFIG_DHD_ENABLE_P2P is not set # CONFIG_HOSTAP is not set @@ -1876,7 +1882,8 @@ CONFIG_MT9D015=y # Graphics support # # CONFIG_DRM is not set -# CONFIG_ION is not set +CONFIG_ION=y +CONFIG_ION_MSM=y CONFIG_MSM_KGSL=y # CONFIG_MSM_KGSL_CFF_DUMP is not set # CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set @@ -2136,7 +2143,7 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICE_CLASS=y # CONFIG_USB_DYNAMIC_MINORS is not set CONFIG_USB_SUSPEND=y -# CONFIG_USB_OTG is not set +CONFIG_USB_OTG=y # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set # CONFIG_USB_MON is not set @@ -2243,46 +2250,38 @@ CONFIG_USB_GADGET_DUALSPEED=y # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set CONFIG_USB_G_ANDROID=y -# CONFIG_USB_ANDROID_ACM is not set -CONFIG_USB_ANDROID_ADB=y -CONFIG_USB_ANDROID_DIAG=y -CONFIG_USB_ANDROID_MDM9K_DIAG=y -CONFIG_USB_ANDROID_MDM9K_MODEM=y -CONFIG_USB_ANDROID_MASS_STORAGE=y -CONFIG_USB_ANDROID_MTP=y -CONFIG_USB_ANDROID_RNDIS=y -# CONFIG_USB_ANDROID_RMNET is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +CONFIG_USB_CSW_HACK=y +# CONFIG_USB_MSC_PROFILING is not set +CONFIG_MODEM_SUPPORT=y CONFIG_RMNET_SMD_CTL_CHANNEL="" CONFIG_RMNET_SMD_DATA_CHANNEL="" -# CONFIG_USB_ANDROID_RMNET_SDIO is not set CONFIG_RMNET_SDIO_CTL_CHANNEL=8 CONFIG_RMNET_SDIO_DATA_CHANNEL=8 -CONFIG_USB_ANDROID_RMNET_SMD_SDIO=y CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL=8 CONFIG_RMNET_SMD_SDIO_DATA_CHANNEL=8 CONFIG_RMNET_SDIO_SMD_DATA_CHANNEL="DATA40" # CONFIG_USB_ANDROID_RMNET_CTRL_SMD is not set # CONFIG_USB_F_SERIAL is not set -CONFIG_MODEM_SUPPORT=y -CONFIG_USB_ANDROID_SERIAL=y -CONFIG_USB_ANDROID_PROJECTOR=y -CONFIG_USB_ANDROID_ECM=y # CONFIG_USB_F_SERIAL_SDIO is not set # CONFIG_USB_F_SERIAL_SMD is not set -CONFIG_USB_ANDROID_USBNET=y # CONFIG_USB_CDC_COMPOSITE is not set # CONFIG_USB_G_MULTI is not set # CONFIG_USB_G_HID is not set # CONFIG_USB_G_DBGP is not set # CONFIG_USB_ACCESSORY_DETECT_BY_ADC is not set -# CONFIG_USB_CSW_HACK is not set +CONFIG_USB_CSW_HACK=y CONFIG_USB_GADGET_VERIZON_PRODUCT_ID=y +CONFIG_USB_HTC_SWITCH_STUB=y # # OTG and related infrastructure # CONFIG_USB_OTG_UTILS=y -# CONFIG_USB_OTG_WAKELOCK is not set +CONFIG_USB_OTG_WAKELOCK=y # CONFIG_USB_GPIO_VBUS is not set # CONFIG_USB_ULPI is not set # CONFIG_USB_MSM_OTG_72K is not set diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 65c3f2474f5..4e25f188835 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -137,6 +137,11 @@ disable_irq .endm + .macro save_and_disable_irqs_notrace, oldcpsr + mrs \oldcpsr, cpsr + disable_irq_notrace + .endm + /* * Restore interrupt state previously stored in a register. We don't * guarantee that this will preserve the flags. diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 584fe0b6fcd..1ea19850816 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -236,7 +236,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } @@ -244,7 +246,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned static inline void vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + struct mm_struct *mm = vma->vm_mm; + + if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { unsigned long addr = user_addr & PAGE_MASK; __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); } diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c93a22a8b92..152700d63fd 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -23,6 +23,7 @@ #define HWCAP_VFPv4 (1 << 16) #define HWCAP_IDIVA (1 << 17) #define HWCAP_IDIVT (1 << 18) +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #if defined(__KERNEL__) && !defined(__ASSEMBLY__) diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index fd3f17ef94a..d34063ced0c 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -8,128 +8,11 @@ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H -#if __LINUX_ARM_ARCH__ < 6 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ -# include -#else - -/* - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized - * atomic decrement (it is not a reliable atomic decrement but it satisfies - * the defined semantics for our purpose, while being smaller and faster - * than a real atomic decrement or atomic swap. The idea is to attempt - * decrementing the lock value only once. If once decremented it isn't zero, - * or if its store-back fails due to a dispute on the exclusive store, we - * simply bail out immediately through the slow path where the lock will be - * reattempted until it succeeds. - */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - fail_fn(count); - else - smp_rmb(); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res; - - __asm__ ( - - "ldrex %0, [%2] \n\t" - "sub %0, %0, #1 \n\t" - "strex %1, %0, [%2] " - - : "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __res |= __ex_flag; - if (unlikely(__res != 0)) - __res = fail_fn(count); - else - smp_rmb(); - - return __res; -} - -/* - * Same trick is used for the unlock fast path. However the original value, - * rather than the result, is used to test for success in order to have - * better generated assembly. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - smp_wmb(); - __asm__ ( - - "ldrex %0, [%3] \n\t" - "add %1, %0, #1 \n\t" - "strex %2, %1, [%3] " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&(count)->counter) - : "cc","memory" ); - - __orig |= __ex_flag; - if (unlikely(__orig != 0)) - fail_fn(count); -} - /* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. + * On pre-ARMv6 hardware this results in a swp-based implementation, + * which is the most efficient. For ARMv6+, we emit a pair of exclusive + * accesses instead. */ -#define __mutex_slowpath_needs_to_unlock() 1 -/* - * For __mutex_fastpath_trylock we use another construct which could be - * described as a "single value cmpxchg". - * - * This provides the needed trylock semantics like cmpxchg would, but it is - * lighter and less generic than a true cmpxchg implementation. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - int __ex_flag, __res, __orig; - - __asm__ ( - - "1: ldrex %0, [%3] \n\t" - "subs %1, %0, #1 \n\t" - "strexeq %2, %1, [%3] \n\t" - "movlt %0, #0 \n\t" - "cmpeq %2, #0 \n\t" - "bgt 1b " - - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) - : "r" (&count->counter) - : "cc", "memory" ); - if (__orig) - smp_rmb(); - - return __orig; -} - -#endif +#include #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9693d472b0a..07edd7d0cc5 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -376,6 +376,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -387,25 +399,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -432,13 +434,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 43ba0fb1c8a..559ee24c956 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -127,6 +127,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 60843eb0f61..73409e6c025 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0 + mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,6 +17,8 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + movne \tmp1, #0 + mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 3d5fc41ae8d..c49c8f778b5 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -27,9 +27,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field @@ -51,9 +51,9 @@ #if __LINUX_ARM_ARCH__ <= 6 ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] - tst \tmp, #HWCAP_VFPv3D16 - stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} - addne \base, \base, #32*4 @ step over unused register space + tst \tmp, #HWCAP_VFPD32 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + addeq \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 97260060bf2..172ae01c26e 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -719,10 +719,13 @@ static int vfp_set(struct task_struct *target, { int ret; struct thread_info *thread = task_thread_info(target); - struct vfp_hard_struct new_vfp = thread->vfpstate.hard; + struct vfp_hard_struct new_vfp; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); + vfp_sync_hwstate(thread); + new_vfp = thread->vfpstate.hard; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpregs, user_fpregs_offset, @@ -743,9 +746,8 @@ static int vfp_set(struct task_struct *target, if (ret) return ret; - vfp_sync_hwstate(thread); - thread->vfpstate.hard = new_vfp; vfp_flush_hwstate(thread); + thread->vfpstate.hard = new_vfp; return 0; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 0340224cf73..9e617bd4a14 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; + vfp_flush_hwstate(thread); + /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. @@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - if (!err) - vfp_flush_hwstate(thread); - return err ? -EFAULT : 0; } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5eee27487bd..2da52a42496 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -291,20 +291,26 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; - printk("CPU%u: Booted secondary processor\n", cpu); + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); + + printk("CPU%u: Booted secondary processor\n", cpu); cpu_init(); preempt_disable(); @@ -460,9 +466,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); static void ipi_timer(void) { struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); - irq_enter(); evt->event_handler(evt); - irq_exit(); } #ifdef CONFIG_LOCAL_TIMERS @@ -473,7 +477,9 @@ asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) if (local_timer_ack()) { __inc_irq_stat(cpu, local_timer_irqs); + irq_enter(); ipi_timer(); + irq_exit(); } set_irq_regs(old_regs); @@ -591,7 +597,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs) /* Wake up from WFI/WFE using SGI */ break; case IPI_TIMER: + irq_enter(); ipi_timer(); + irq_exit(); break; case IPI_RESCHEDULE: @@ -599,15 +607,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: + irq_enter(); generic_smp_call_function_interrupt(); + irq_exit(); break; case IPI_CALL_FUNC_SINGLE: + irq_enter(); generic_smp_call_function_single_interrupt(); + irq_exit(); break; case IPI_CPU_STOP: + irq_enter(); ipi_cpu_stop(cpu); + irq_exit(); break; default: diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 40ee7e5045e..0951a32493a 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -108,10 +108,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { siginfo_t info; + down_read(¤t->mm->mmap_sem); if (find_vma(current->mm, addr) == NULL) info.si_code = SEGV_MAPERR; else info.si_code = SEGV_ACCERR; + up_read(¤t->mm->mmap_sem); info.si_signo = SIGSEGV; info.si_errno = 0; diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 62e7c61d034..0264ab433e9 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -115,7 +115,7 @@ int kernel_execve(const char *filename, "Ir" (THREAD_START_SP - sizeof(regs)), "r" (®s), "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); + : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); out: return ret; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c new file mode 100644 index 00000000000..939b2d8400b --- /dev/null +++ b/arch/arm/kernel/topology.c @@ -0,0 +1,478 @@ +/* + * arch/arm/kernel/topology.c + * + * Copyright (C) 2011 Linaro Limited. + * Written by: Vincent Guittot + * + * based on arch/sh/kernel/topology.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_FS +#include +#include /* for copy_from_user */ +#endif + +#include +#include + +/* + * cpu power scale management + */ + +/* + * cpu power table + * This per cpu data structure describes the relative capacity of each core. + * On a heteregenous system, cores don't have the same computation capacity + * and we reflect that difference in the cpu_power field so the scheduler can + * take this difference into account during load balance. A per cpu structure + * is preferred because each CPU updates its own cpu_power field during the + * load balance except for idle cores. One idle core is selected to run the + * rebalance_domains for all idle cores and the cpu_power can be updated + * during this sequence. + */ +static DEFINE_PER_CPU(unsigned long, cpu_scale); + +unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +static void set_power_scale(unsigned int cpu, unsigned long power) +{ + per_cpu(cpu_scale, cpu) = power; +} + +/* + * cpu topology management + */ + +#define ARM_FAMILY_MASK 0xFF0FFFF0 + +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +/* + * These masks reflect the current use of the affinity levels. + * The affinity level can be up to 16 bits according to ARM ARM + */ + +#define MPIDR_LEVEL0_MASK 0x3 +#define MPIDR_LEVEL0_SHIFT 0 + +#define MPIDR_LEVEL1_MASK 0xF +#define MPIDR_LEVEL1_SHIFT 8 + +#define MPIDR_LEVEL2_MASK 0xFF +#define MPIDR_LEVEL2_SHIFT 16 +/* + * CPU topology table + */ +struct cputopo_arm cpu_topology[NR_CPUS]; + +/* + * cpu power scale management + * a per cpu data structure should be better because each cpu is mainly + * using its own cpu_power even it's not always true because of + * nohz_idle_balance + */ + + + +/* + * cpu topology mask update management + */ + +static unsigned int prev_sched_mc_power_savings = 0; +static unsigned int prev_sched_smt_power_savings = 0; + +ATOMIC_NOTIFIER_HEAD(topology_update_notifier_list); + +/* + * Update the cpu power of the scheduler + */ + + +int topology_register_notifier(struct notifier_block *nb) +{ + + return atomic_notifier_chain_register( + &topology_update_notifier_list, nb); +} + +int topology_unregister_notifier(struct notifier_block *nb) +{ + + return atomic_notifier_chain_unregister( + &topology_update_notifier_list, nb); +} + +/* + * sched_domain flag configuration + */ +/* TODO add a config flag for this function */ +int arch_sd_sibling_asym_packing(void) +{ + if (sched_smt_power_savings || sched_mc_power_savings) + return SD_ASYM_PACKING; + return 0; +} + +/* + * default topology function + */ +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_topology[cpu].core_sibling; +} + +/* + * clear cpu topology masks + */ +static void clear_cpu_topology_mask(void) +{ + unsigned int cpuid; + for_each_possible_cpu(cpuid) { + struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); + cpumask_clear(&cpuid_topo->core_sibling); + cpumask_clear(&cpuid_topo->thread_sibling); + } + smp_wmb(); +} + +/* + * default_cpu_topology_mask set the core and thread mask as described in the + * ARM ARM + */ +static void default_cpu_topology_mask(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->socket_id == cpu_topo->socket_id) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + +static void normal_cpu_topology_mask(void) +{ + unsigned int cpuid; + + for_each_possible_cpu(cpuid) { + default_cpu_topology_mask(cpuid); + } + smp_wmb(); +} + +/* + * For Cortex-A9 MPcore, we emulate a multi-package topology in power mode. + * The goal is to gathers tasks on 1 virtual package + */ +static void power_cpu_topology_mask_CA9(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; + + if ((cpuid_topo->socket_id == cpu_topo->socket_id) + && ((cpuid & 0x1) == (cpu & 0x1))) { + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id == cpu_topo->core_id) { + cpumask_set_cpu(cpuid, + &cpu_topo->thread_sibling); + if (cpu != cpuid) + cpumask_set_cpu(cpu, + &cpuid_topo->thread_sibling); + } + } + } + smp_wmb(); +} + +static int need_topology_update(void) +{ + int update; + + update = ((prev_sched_mc_power_savings ^ sched_mc_power_savings) + || (prev_sched_smt_power_savings ^ sched_smt_power_savings)); + + prev_sched_mc_power_savings = sched_mc_power_savings; + prev_sched_smt_power_savings = sched_smt_power_savings; + + return update; +} + +#define ARM_CORTEX_A9_FAMILY 0x410FC090 + +/* update_cpu_topology_policy select a cpu topology policy according to the + * available cores. + * TODO: The current version assumes that all cores are exactly the same which + * might not be true. We need to update it to take into account various + * configuration among which system with different kind of core. + */ +static int update_cpu_topology_mask(void) +{ + unsigned long cpuid; + + if (sched_mc_power_savings == POWERSAVINGS_BALANCE_NONE) { + normal_cpu_topology_mask(); + return 0; + } + + for_each_possible_cpu(cpuid) { + struct cputopo_arm *cpuid_topo = &(cpu_topology[cpuid]); + + switch (cpuid_topo->id) { + case ARM_CORTEX_A9_FAMILY: + power_cpu_topology_mask_CA9(cpuid); + break; + default: + default_cpu_topology_mask(cpuid); + break; + } + } + + return 0; +} + +/* + * store_cpu_topology is called at boot when only one cpu is running + * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, + * which prevents simultaneous write access to cpu_topology array + */ +void store_cpu_topology(unsigned int cpuid) +{ + struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + unsigned int mpidr; + + /* If the cpu topology has been already set, just return */ + if (cpuid_topo->core_id != -1) + return; + + mpidr = read_cpuid_mpidr(); + + /* create cpu topology mapping */ + if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) { + /* + * This is a multiprocessor system + * multiprocessor format & multiprocessor mode field are set + */ + + if (mpidr & MPIDR_MT_BITMASK) { + /* core performance interdependency */ + cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT) + & MPIDR_LEVEL0_MASK; + cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT) + & MPIDR_LEVEL1_MASK; + cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT) + & MPIDR_LEVEL2_MASK; + } else { + /* largely independent cores */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT) + & MPIDR_LEVEL0_MASK; + cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT) + & MPIDR_LEVEL1_MASK; + } + + cpuid_topo->id = read_cpuid_id() & ARM_FAMILY_MASK; + + } else { + /* + * This is an uniprocessor system + * we are in multiprocessor format but uniprocessor system + * or in the old uniprocessor format + */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = 0; + cpuid_topo->socket_id = -1; + } + + /* + * The core and thread sibling masks can also be updated during the + * call of arch_update_cpu_topology + */ + default_cpu_topology_mask(cpuid); + + + printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", + cpuid, cpu_topology[cpuid].thread_id, + cpu_topology[cpuid].core_id, + cpu_topology[cpuid].socket_id, mpidr); +} + +/* + * arch_update_cpu_topology is called by the scheduler before building + * a new sched_domain hierarchy. + */ +int arch_update_cpu_topology(void) +{ + if (!need_topology_update()) + return 0; + + /* clear core threads mask */ + clear_cpu_topology_mask(); + + /* set topology mask */ + update_cpu_topology_mask(); + + /* notify the topology update */ + atomic_notifier_call_chain(&topology_update_notifier_list, + TOPOLOGY_POSTCHANGE, (void *)sched_mc_power_savings); + + return 1; +} + +/* + * init_cpu_topology is called at boot when only one cpu is running + * which prevent simultaneous write access to cpu_topology array + */ +void init_cpu_topology(void) +{ + unsigned int cpu; + + /* init core mask */ + for_each_possible_cpu(cpu) { + struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); + + cpu_topo->id = -1; + cpu_topo->thread_id = -1; + cpu_topo->core_id = -1; + cpu_topo->socket_id = -1; + cpumask_clear(&cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + + set_power_scale(cpu, SCHED_POWER_SCALE); + } + smp_wmb(); +} + +/* + * debugfs interface for scaling cpu power + */ + +#ifdef CONFIG_DEBUG_FS +static struct dentry *topo_debugfs_root; + +static ssize_t dbg_write(struct file *file, const char __user *buf, + size_t size, loff_t *off) +{ + unsigned int *value = file->f_dentry->d_inode->i_private; + char cdata[128]; + unsigned long tmp; + + if (size < (sizeof(cdata)-1)) { + if (copy_from_user(cdata, buf, size)) + return -EFAULT; + cdata[size] = 0; + if (!strict_strtoul(cdata, 10, &tmp)) { + *value = tmp; + } + return size; + } + return -EINVAL; +} + +static ssize_t dbg_read(struct file *file, char __user *buf, + size_t size, loff_t *off) +{ + unsigned int *value = file->f_dentry->d_inode->i_private; + char cdata[128]; + unsigned int len; + + len = sprintf(cdata, "%u\n", *value); + return simple_read_from_buffer(buf, size, off, cdata, len); +} + +static const struct file_operations debugfs_fops = { + .read = dbg_read, + .write = dbg_write, +}; + +static struct dentry *topo_debugfs_register(unsigned int cpu, + struct dentry *parent) +{ + struct dentry *cpu_d, *d; + char cpu_name[16]; + + sprintf(cpu_name, "cpu%u", cpu); + + cpu_d = debugfs_create_dir(cpu_name, parent); + if (!cpu_d) + return NULL; + + d = debugfs_create_file("cpu_power", S_IRUGO | S_IWUGO, + cpu_d, &per_cpu(cpu_scale, cpu), &debugfs_fops); + if (!d) + goto err_out; + + return cpu_d; + +err_out: + debugfs_remove_recursive(cpu_d); + return NULL; +} + +static int __init topo_debugfs_init(void) +{ + struct dentry *d; + unsigned int cpu; + + d = debugfs_create_dir("cpu_topo", NULL); + if (!d) + return -ENOMEM; + topo_debugfs_root = d; + + for_each_possible_cpu(cpu) { + d = topo_debugfs_register(cpu, topo_debugfs_root); + if (d == NULL) + goto err_out; + } + return 0; + +err_out: + debugfs_remove_recursive(topo_debugfs_root); + return -ENOMEM; +} + +late_initcall(topo_debugfs_init); +#endif + diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 7227755ffec..871a818e3db 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -454,7 +454,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91rm9200_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 39f81f47b4b..89a8414d7e2 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -459,7 +459,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9260_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 5004bf0a05f..5d43cf45231 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -276,7 +276,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9261_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index a050f41fc86..2bbd16301a0 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -534,7 +534,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9263_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index aacb19dc922..659870e647f 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -319,7 +319,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9rl_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c index cf7e5985eeb..46c04498629 100644 --- a/arch/arm/mach-dove/common.c +++ b/arch/arm/mach-dove/common.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "common.h" @@ -74,7 +75,7 @@ void __init dove_map_io(void) void __init dove_ehci0_init(void) { orion_ehci_init(&dove_mbus_dram_info, - DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0); + DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0, EHCI_PHY_NA); } /***************************************************************************** diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h index 3ad9f946a9e..11799c33475 100644 --- a/arch/arm/mach-dove/include/mach/pm.h +++ b/arch/arm/mach-dove/include/mach/pm.h @@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin) static inline int irq_to_pmu(int irq) { - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS) + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) return irq - IRQ_DOVE_PMU_START; return -EINVAL; diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index f07fd16e0c9..9f2fd100174 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d) int pin = irq_to_pmu(d->irq); u32 u; + /* + * The PMU mask register is not RW0C: it is RW. This means that + * the bits take whatever value is written to them; if you write + * a '1', you will set the interrupt. + * + * Unfortunately this means there is NO race free way to clear + * these interrupts. + * + * So, let's structure the code so that the window is as small as + * possible. + */ u = ~(1 << (pin & 31)); - writel(u, PMU_INTERRUPT_CAUSE); + u &= readl_relaxed(PMU_INTERRUPT_CAUSE); + writel_relaxed(u, PMU_INTERRUPT_CAUSE); } static struct irq_chip pmu_irq_chip = { diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index 74ac88978dd..a37fe021d69 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c @@ -32,7 +32,7 @@ * Memory-mapped I/O on MX21ADS base board */ #define MX21ADS_MMIO_BASE_ADDR 0xf5000000 -#define MX21ADS_MMIO_SIZE SZ_16M +#define MX21ADS_MMIO_SIZE 0xc00000 #define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \ (MX21ADS_MMIO_BASE_ADDR + (offset)) diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index f3248cfbe51..c5dbbb35e0b 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include "common.h" @@ -74,7 +75,7 @@ void __init kirkwood_ehci_init(void) { kirkwood_clk_ctrl |= CGC_USB0; orion_ehci_init(&kirkwood_mbus_dram_info, - USB_PHYS_BASE, IRQ_KIRKWOOD_USB); + USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA); } diff --git a/arch/arm/mach-kirkwood/mpp.h b/arch/arm/mach-kirkwood/mpp.h index ac787957e2d..7afccf47220 100644 --- a/arch/arm/mach-kirkwood/mpp.h +++ b/arch/arm/mach-kirkwood/mpp.h @@ -31,313 +31,313 @@ #define MPP_F6282_MASK MPP( 0, 0x0, 0, 0, 0, 0, 0, 0, 1 ) #define MPP0_GPIO MPP( 0, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP0_NF_IO2 MPP( 0, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP0_SPI_SCn MPP( 0, 0x2, 0, 1, 1, 1, 1, 1, 1 ) +#define MPP0_NF_IO2 MPP( 0, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP0_SPI_SCn MPP( 0, 0x2, 0, 0, 1, 1, 1, 1, 1 ) #define MPP1_GPO MPP( 1, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP1_NF_IO3 MPP( 1, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP1_SPI_MOSI MPP( 1, 0x2, 0, 1, 1, 1, 1, 1, 1 ) +#define MPP1_NF_IO3 MPP( 1, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP1_SPI_MOSI MPP( 1, 0x2, 0, 0, 1, 1, 1, 1, 1 ) #define MPP2_GPO MPP( 2, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP2_NF_IO4 MPP( 2, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP2_SPI_SCK MPP( 2, 0x2, 0, 1, 1, 1, 1, 1, 1 ) +#define MPP2_NF_IO4 MPP( 2, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP2_SPI_SCK MPP( 2, 0x2, 0, 0, 1, 1, 1, 1, 1 ) #define MPP3_GPO MPP( 3, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP3_NF_IO5 MPP( 3, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP3_SPI_MISO MPP( 3, 0x2, 1, 0, 1, 1, 1, 1, 1 ) +#define MPP3_NF_IO5 MPP( 3, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP3_SPI_MISO MPP( 3, 0x2, 0, 0, 1, 1, 1, 1, 1 ) #define MPP4_GPIO MPP( 4, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP4_NF_IO6 MPP( 4, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP4_UART0_RXD MPP( 4, 0x2, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP4_SATA1_ACTn MPP( 4, 0x5, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP4_NF_IO6 MPP( 4, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP4_UART0_RXD MPP( 4, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP4_SATA1_ACTn MPP( 4, 0x5, 0, 0, 0, 0, 1, 1, 1 ) #define MPP4_LCD_VGA_HSYNC MPP( 4, 0xb, 0, 0, 0, 0, 0, 0, 1 ) -#define MPP4_PTP_CLK MPP( 4, 0xd, 1, 0, 1, 1, 1, 1, 0 ) +#define MPP4_PTP_CLK MPP( 4, 0xd, 0, 0, 1, 1, 1, 1, 0 ) #define MPP5_GPO MPP( 5, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP5_NF_IO7 MPP( 5, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP5_UART0_TXD MPP( 5, 0x2, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP5_PTP_TRIG_GEN MPP( 5, 0x4, 0, 1, 1, 1, 1, 1, 0 ) -#define MPP5_SATA0_ACTn MPP( 5, 0x5, 0, 1, 0, 1, 1, 1, 1 ) +#define MPP5_NF_IO7 MPP( 5, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP5_UART0_TXD MPP( 5, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP5_PTP_TRIG_GEN MPP( 5, 0x4, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP5_SATA0_ACTn MPP( 5, 0x5, 0, 0, 0, 1, 1, 1, 1 ) #define MPP5_LCD_VGA_VSYNC MPP( 5, 0xb, 0, 0, 0, 0, 0, 0, 1 ) -#define MPP6_SYSRST_OUTn MPP( 6, 0x1, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP6_SPI_MOSI MPP( 6, 0x2, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP6_PTP_TRIG_GEN MPP( 6, 0x3, 0, 1, 1, 1, 1, 1, 0 ) +#define MPP6_SYSRST_OUTn MPP( 6, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP6_SPI_MOSI MPP( 6, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP6_PTP_TRIG_GEN MPP( 6, 0x3, 0, 0, 1, 1, 1, 1, 0 ) #define MPP7_GPO MPP( 7, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP7_PEX_RST_OUTn MPP( 7, 0x1, 0, 1, 1, 1, 1, 1, 0 ) -#define MPP7_SPI_SCn MPP( 7, 0x2, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP7_PTP_TRIG_GEN MPP( 7, 0x3, 0, 1, 1, 1, 1, 1, 0 ) -#define MPP7_LCD_PWM MPP( 7, 0xb, 0, 1, 0, 0, 0, 0, 1 ) +#define MPP7_PEX_RST_OUTn MPP( 7, 0x1, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP7_SPI_SCn MPP( 7, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP7_PTP_TRIG_GEN MPP( 7, 0x3, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP7_LCD_PWM MPP( 7, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP8_GPIO MPP( 8, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP8_TW0_SDA MPP( 8, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP8_UART0_RTS MPP( 8, 0x2, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP8_UART1_RTS MPP( 8, 0x3, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP8_MII0_RXERR MPP( 8, 0x4, 1, 0, 0, 1, 1, 1, 1 ) -#define MPP8_SATA1_PRESENTn MPP( 8, 0x5, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP8_PTP_CLK MPP( 8, 0xc, 1, 0, 1, 1, 1, 1, 0 ) -#define MPP8_MII0_COL MPP( 8, 0xd, 1, 0, 1, 1, 1, 1, 1 ) +#define MPP8_TW0_SDA MPP( 8, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP8_UART0_RTS MPP( 8, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP8_UART1_RTS MPP( 8, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP8_MII0_RXERR MPP( 8, 0x4, 0, 0, 0, 1, 1, 1, 1 ) +#define MPP8_SATA1_PRESENTn MPP( 8, 0x5, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP8_PTP_CLK MPP( 8, 0xc, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP8_MII0_COL MPP( 8, 0xd, 0, 0, 1, 1, 1, 1, 1 ) #define MPP9_GPIO MPP( 9, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP9_TW0_SCK MPP( 9, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP9_UART0_CTS MPP( 9, 0x2, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP9_UART1_CTS MPP( 9, 0x3, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP9_SATA0_PRESENTn MPP( 9, 0x5, 0, 1, 0, 1, 1, 1, 1 ) -#define MPP9_PTP_EVENT_REQ MPP( 9, 0xc, 1, 0, 1, 1, 1, 1, 0 ) -#define MPP9_MII0_CRS MPP( 9, 0xd, 1, 0, 1, 1, 1, 1, 1 ) +#define MPP9_TW0_SCK MPP( 9, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP9_UART0_CTS MPP( 9, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP9_UART1_CTS MPP( 9, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP9_SATA0_PRESENTn MPP( 9, 0x5, 0, 0, 0, 1, 1, 1, 1 ) +#define MPP9_PTP_EVENT_REQ MPP( 9, 0xc, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP9_MII0_CRS MPP( 9, 0xd, 0, 0, 1, 1, 1, 1, 1 ) #define MPP10_GPO MPP( 10, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP10_SPI_SCK MPP( 10, 0x2, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP10_UART0_TXD MPP( 10, 0X3, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP10_SATA1_ACTn MPP( 10, 0x5, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP10_PTP_TRIG_GEN MPP( 10, 0xc, 0, 1, 1, 1, 1, 1, 0 ) +#define MPP10_SPI_SCK MPP( 10, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP10_UART0_TXD MPP( 10, 0X3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP10_SATA1_ACTn MPP( 10, 0x5, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP10_PTP_TRIG_GEN MPP( 10, 0xc, 0, 0, 1, 1, 1, 1, 0 ) #define MPP11_GPIO MPP( 11, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP11_SPI_MISO MPP( 11, 0x2, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP11_UART0_RXD MPP( 11, 0x3, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP11_PTP_EVENT_REQ MPP( 11, 0x4, 1, 0, 1, 1, 1, 1, 0 ) -#define MPP11_PTP_TRIG_GEN MPP( 11, 0xc, 0, 1, 1, 1, 1, 1, 0 ) -#define MPP11_PTP_CLK MPP( 11, 0xd, 1, 0, 1, 1, 1, 1, 0 ) -#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 1, 0, 1, 1, 1, 1 ) +#define MPP11_SPI_MISO MPP( 11, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP11_UART0_RXD MPP( 11, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP11_PTP_EVENT_REQ MPP( 11, 0x4, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP11_PTP_TRIG_GEN MPP( 11, 0xc, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP11_PTP_CLK MPP( 11, 0xd, 0, 0, 1, 1, 1, 1, 0 ) +#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 0, 0, 1, 1, 1, 1 ) #define MPP12_GPO MPP( 12, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP12_SD_CLK MPP( 12, 0x1, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 1, 0, 0, 0, 0, 1 ) -#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 1, 0, 0, 0, 0, 1 ) -#define MPP12_TW1_SDA MPP( 12, 0xd, 1, 0, 0, 0, 0, 0, 1 ) +#define MPP12_SD_CLK MPP( 12, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP12_TW1_SDA MPP( 12, 0xd, 0, 0, 0, 0, 0, 0, 1 ) #define MPP13_GPIO MPP( 13, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP13_SD_CMD MPP( 13, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP13_UART1_TXD MPP( 13, 0x3, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP13_AU_SPDIFRMCLK MPP( 13, 0xa, 0, 1, 0, 0, 0, 0, 1 ) -#define MPP13_LCDPWM MPP( 13, 0xb, 0, 1, 0, 0, 0, 0, 1 ) +#define MPP13_SD_CMD MPP( 13, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP13_UART1_TXD MPP( 13, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP13_AU_SPDIFRMCLK MPP( 13, 0xa, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP13_LCDPWM MPP( 13, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP14_GPIO MPP( 14, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP14_SD_D0 MPP( 14, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP14_UART1_RXD MPP( 14, 0x3, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP14_SATA1_PRESENTn MPP( 14, 0x4, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP14_AU_SPDIFI MPP( 14, 0xa, 1, 0, 0, 0, 0, 0, 1 ) -#define MPP14_AU_I2SDI MPP( 14, 0xb, 1, 0, 0, 0, 0, 0, 1 ) -#define MPP14_MII0_COL MPP( 14, 0xd, 1, 0, 1, 1, 1, 1, 1 ) +#define MPP14_SD_D0 MPP( 14, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP14_UART1_RXD MPP( 14, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP14_SATA1_PRESENTn MPP( 14, 0x4, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP14_AU_SPDIFI MPP( 14, 0xa, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP14_AU_I2SDI MPP( 14, 0xb, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP14_MII0_COL MPP( 14, 0xd, 0, 0, 1, 1, 1, 1, 1 ) #define MPP15_GPIO MPP( 15, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP15_SD_D1 MPP( 15, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP15_UART0_RTS MPP( 15, 0x2, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP15_UART1_TXD MPP( 15, 0x3, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP15_SATA0_ACTn MPP( 15, 0x4, 0, 1, 0, 1, 1, 1, 1 ) -#define MPP15_SPI_CSn MPP( 15, 0xb, 0, 1, 0, 0, 0, 0, 1 ) +#define MPP15_SD_D1 MPP( 15, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP15_UART0_RTS MPP( 15, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP15_UART1_TXD MPP( 15, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP15_SATA0_ACTn MPP( 15, 0x4, 0, 0, 0, 1, 1, 1, 1 ) +#define MPP15_SPI_CSn MPP( 15, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP16_GPIO MPP( 16, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP16_SD_D2 MPP( 16, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP16_UART0_CTS MPP( 16, 0x2, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP16_UART1_RXD MPP( 16, 0x3, 1, 0, 1, 1, 1, 1, 1 ) -#define MPP16_SATA1_ACTn MPP( 16, 0x4, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP16_LCD_EXT_REF_CLK MPP( 16, 0xb, 1, 0, 0, 0, 0, 0, 1 ) -#define MPP16_MII0_CRS MPP( 16, 0xd, 1, 0, 1, 1, 1, 1, 1 ) +#define MPP16_SD_D2 MPP( 16, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP16_UART0_CTS MPP( 16, 0x2, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP16_UART1_RXD MPP( 16, 0x3, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP16_SATA1_ACTn MPP( 16, 0x4, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP16_LCD_EXT_REF_CLK MPP( 16, 0xb, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP16_MII0_CRS MPP( 16, 0xd, 0, 0, 1, 1, 1, 1, 1 ) #define MPP17_GPIO MPP( 17, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP17_SD_D3 MPP( 17, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP17_SATA0_PRESENTn MPP( 17, 0x4, 0, 1, 0, 1, 1, 1, 1 ) -#define MPP17_SATA1_ACTn MPP( 17, 0xa, 0, 1, 0, 0, 0, 0, 1 ) -#define MPP17_TW1_SCK MPP( 17, 0xd, 1, 1, 0, 0, 0, 0, 1 ) +#define MPP17_SD_D3 MPP( 17, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP17_SATA0_PRESENTn MPP( 17, 0x4, 0, 0, 0, 1, 1, 1, 1 ) +#define MPP17_SATA1_ACTn MPP( 17, 0xa, 0, 0, 0, 0, 0, 0, 1 ) +#define MPP17_TW1_SCK MPP( 17, 0xd, 0, 0, 0, 0, 0, 0, 1 ) #define MPP18_GPO MPP( 18, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP18_NF_IO0 MPP( 18, 0x1, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP18_PEX0_CLKREQ MPP( 18, 0x2, 0, 1, 0, 0, 0, 0, 1 ) +#define MPP18_NF_IO0 MPP( 18, 0x1, 0, 0, 1, 1, 1, 1, 1 ) +#define MPP18_PEX0_CLKREQ MPP( 18, 0x2, 0, 0, 0, 0, 0, 0, 1 ) #define MPP19_GPO MPP( 19, 0x0, 0, 1, 1, 1, 1, 1, 1 ) -#define MPP19_NF_IO1 MPP( 19, 0x1, 1, 1, 1, 1, 1, 1, 1 ) +#define MPP19_NF_IO1 MPP( 19, 0x1, 0, 0, 1, 1, 1, 1, 1 ) #define MPP20_GPIO MPP( 20, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP20_TSMP0 MPP( 20, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP20_TDM_CH0_TX_QL MPP( 20, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP20_TSMP0 MPP( 20, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP20_TDM_CH0_TX_QL MPP( 20, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP20_GE1_TXD0 MPP( 20, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP20_AU_SPDIFI MPP( 20, 0x4, 1, 0, 0, 0, 1, 1, 1 ) -#define MPP20_SATA1_ACTn MPP( 20, 0x5, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP20_AU_SPDIFI MPP( 20, 0x4, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP20_SATA1_ACTn MPP( 20, 0x5, 0, 0, 0, 0, 1, 1, 1 ) #define MPP20_LCD_D0 MPP( 20, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP21_GPIO MPP( 21, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP21_TSMP1 MPP( 21, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP21_TDM_CH0_RX_QL MPP( 21, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP21_TSMP1 MPP( 21, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP21_TDM_CH0_RX_QL MPP( 21, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP21_GE1_TXD1 MPP( 21, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP21_AU_SPDIFO MPP( 21, 0x4, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP21_SATA0_ACTn MPP( 21, 0x5, 0, 1, 0, 1, 1, 1, 1 ) +#define MPP21_AU_SPDIFO MPP( 21, 0x4, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP21_SATA0_ACTn MPP( 21, 0x5, 0, 0, 0, 1, 1, 1, 1 ) #define MPP21_LCD_D1 MPP( 21, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP22_GPIO MPP( 22, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP22_TSMP2 MPP( 22, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP22_TDM_CH2_TX_QL MPP( 22, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP22_TSMP2 MPP( 22, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP22_TDM_CH2_TX_QL MPP( 22, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP22_GE1_TXD2 MPP( 22, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP22_AU_SPDIFRMKCLK MPP( 22, 0x4, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP22_SATA1_PRESENTn MPP( 22, 0x5, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP22_AU_SPDIFRMKCLK MPP( 22, 0x4, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP22_SATA1_PRESENTn MPP( 22, 0x5, 0, 0, 0, 0, 1, 1, 1 ) #define MPP22_LCD_D2 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP23_GPIO MPP( 23, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP23_TSMP3 MPP( 23, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP23_TDM_CH2_RX_QL MPP( 23, 0x2, 1, 0, 0, 0, 1, 1, 1 ) +#define MPP23_TSMP3 MPP( 23, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP23_TDM_CH2_RX_QL MPP( 23, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP23_GE1_TXD3 MPP( 23, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP23_AU_I2SBCLK MPP( 23, 0x4, 0, 1, 0, 0, 1, 1, 1 ) -#define MPP23_SATA0_PRESENTn MPP( 23, 0x5, 0, 1, 0, 1, 1, 1, 1 ) +#define MPP23_AU_I2SBCLK MPP( 23, 0x4, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP23_SATA0_PRESENTn MPP( 23, 0x5, 0, 0, 0, 1, 1, 1, 1 ) #define MPP23_LCD_D3 MPP( 23, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP24_GPIO MPP( 24, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP24_TSMP4 MPP( 24, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP24_TDM_SPI_CS0 MPP( 24, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP24_TSMP4 MPP( 24, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP24_TDM_SPI_CS0 MPP( 24, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP24_GE1_RXD0 MPP( 24, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP24_AU_I2SDO MPP( 24, 0x4, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP24_AU_I2SDO MPP( 24, 0x4, 0, 0, 0, 0, 1, 1, 1 ) #define MPP24_LCD_D4 MPP( 24, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP25_GPIO MPP( 25, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP25_TSMP5 MPP( 25, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP25_TDM_SPI_SCK MPP( 25, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP25_TSMP5 MPP( 25, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP25_TDM_SPI_SCK MPP( 25, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP25_GE1_RXD1 MPP( 25, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP25_AU_I2SLRCLK MPP( 25, 0x4, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP25_AU_I2SLRCLK MPP( 25, 0x4, 0, 0, 0, 0, 1, 1, 1 ) #define MPP25_LCD_D5 MPP( 25, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP26_GPIO MPP( 26, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP26_TSMP6 MPP( 26, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP26_TDM_SPI_MISO MPP( 26, 0x2, 1, 0, 0, 0, 1, 1, 1 ) +#define MPP26_TSMP6 MPP( 26, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP26_TDM_SPI_MISO MPP( 26, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP26_GE1_RXD2 MPP( 26, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP26_AU_I2SMCLK MPP( 26, 0x4, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP26_AU_I2SMCLK MPP( 26, 0x4, 0, 0, 0, 0, 1, 1, 1 ) #define MPP26_LCD_D6 MPP( 26, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP27_GPIO MPP( 27, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP27_TSMP7 MPP( 27, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP27_TDM_SPI_MOSI MPP( 27, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP27_TSMP7 MPP( 27, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP27_TDM_SPI_MOSI MPP( 27, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP27_GE1_RXD3 MPP( 27, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP27_AU_I2SDI MPP( 27, 0x4, 1, 0, 0, 0, 1, 1, 1 ) +#define MPP27_AU_I2SDI MPP( 27, 0x4, 0, 0, 0, 0, 1, 1, 1 ) #define MPP27_LCD_D7 MPP( 27, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP28_GPIO MPP( 28, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP28_TSMP8 MPP( 28, 0x1, 1, 1, 0, 0, 1, 1, 1 ) +#define MPP28_TSMP8 MPP( 28, 0x1, 0, 0, 0, 0, 1, 1, 1 ) #define MPP28_TDM_CODEC_INTn MPP( 28, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP28_GE1_COL MPP( 28, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP28_AU_EXTCLK MPP( 28, 0x4, 1, 0, 0, 0, 1, 1, 1 ) +#define MPP28_AU_EXTCLK MPP( 28, 0x4, 0, 0, 0, 0, 1, 1, 1 ) #define MPP28_LCD_D8 MPP( 28, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP29_GPIO MPP( 29, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP29_TSMP9 MPP( 29, 0x1, 1, 1, 0, 0, 1, 1, 1 ) +#define MPP29_TSMP9 MPP( 29, 0x1, 0, 0, 0, 0, 1, 1, 1 ) #define MPP29_TDM_CODEC_RSTn MPP( 29, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP29_GE1_TCLK MPP( 29, 0x3, 0, 0, 0, 1, 1, 1, 1 ) #define MPP29_LCD_D9 MPP( 29, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP30_GPIO MPP( 30, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP30_TSMP10 MPP( 30, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP30_TDM_PCLK MPP( 30, 0x2, 1, 1, 0, 0, 1, 1, 1 ) +#define MPP30_TSMP10 MPP( 30, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP30_TDM_PCLK MPP( 30, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP30_GE1_RXCTL MPP( 30, 0x3, 0, 0, 0, 1, 1, 1, 1 ) #define MPP30_LCD_D10 MPP( 30, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP31_GPIO MPP( 31, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP31_TSMP11 MPP( 31, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP31_TDM_FS MPP( 31, 0x2, 1, 1, 0, 0, 1, 1, 1 ) +#define MPP31_TSMP11 MPP( 31, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP31_TDM_FS MPP( 31, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP31_GE1_RXCLK MPP( 31, 0x3, 0, 0, 0, 1, 1, 1, 1 ) #define MPP31_LCD_D11 MPP( 31, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP32_GPIO MPP( 32, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP32_TSMP12 MPP( 32, 0x1, 1, 1, 0, 0, 1, 1, 1 ) -#define MPP32_TDM_DRX MPP( 32, 0x2, 1, 0, 0, 0, 1, 1, 1 ) +#define MPP32_TSMP12 MPP( 32, 0x1, 0, 0, 0, 0, 1, 1, 1 ) +#define MPP32_TDM_DRX MPP( 32, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP32_GE1_TCLKOUT MPP( 32, 0x3, 0, 0, 0, 1, 1, 1, 1 ) #define MPP32_LCD_D12 MPP( 32, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP33_GPO MPP( 33, 0x0, 0, 1, 0, 1, 1, 1, 1 ) -#define MPP33_TDM_DTX MPP( 33, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP33_TDM_DTX MPP( 33, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP33_GE1_TXCTL MPP( 33, 0x3, 0, 0, 0, 1, 1, 1, 1 ) #define MPP33_LCD_D13 MPP( 33, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP34_GPIO MPP( 34, 0x0, 1, 1, 0, 1, 1, 1, 1 ) -#define MPP34_TDM_SPI_CS1 MPP( 34, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP34_TDM_SPI_CS1 MPP( 34, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP34_GE1_TXEN MPP( 34, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP34_SATA1_ACTn MPP( 34, 0x5, 0, 1, 0, 0, 0, 1, 1 ) +#define MPP34_SATA1_ACTn MPP( 34, 0x5, 0, 0, 0, 0, 0, 1, 1 ) #define MPP34_LCD_D14 MPP( 34, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP35_GPIO MPP( 35, 0x0, 1, 1, 1, 1, 1, 1, 1 ) -#define MPP35_TDM_CH0_TX_QL MPP( 35, 0x2, 0, 1, 0, 0, 1, 1, 1 ) +#define MPP35_TDM_CH0_TX_QL MPP( 35, 0x2, 0, 0, 0, 0, 1, 1, 1 ) #define MPP35_GE1_RXERR MPP( 35, 0x3, 0, 0, 0, 1, 1, 1, 1 ) -#define MPP35_SATA0_ACTn MPP( 35, 0x5, 0, 1, 0, 1, 1, 1, 1 ) +#define MPP35_SATA0_ACTn MPP( 35, 0x5, 0, 0, 0, 1, 1, 1, 1 ) #define MPP35_LCD_D15 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) -#define MPP35_MII0_RXERR MPP( 35, 0xc, 1, 0, 1, 1, 1, 1, 1 ) +#define MPP35_MII0_RXERR MPP( 35, 0xc, 0, 0, 1, 1, 1, 1, 1 ) #define MPP36_GPIO MPP( 36, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP36_TSMP0 MPP( 36, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP36_TDM_SPI_CS1 MPP( 36, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP36_AU_SPDIFI MPP( 36, 0x4, 1, 0, 1, 0, 0, 1, 1 ) -#define MPP36_TW1_SDA MPP( 36, 0xb, 1, 1, 0, 0, 0, 0, 1 ) +#define MPP36_TSMP0 MPP( 36, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP36_TDM_SPI_CS1 MPP( 36, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP36_AU_SPDIFI MPP( 36, 0x4, 0, 0, 1, 0, 0, 1, 1 ) +#define MPP36_TW1_SDA MPP( 36, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP37_GPIO MPP( 37, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP37_TSMP1 MPP( 37, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP37_TDM_CH2_TX_QL MPP( 37, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP37_AU_SPDIFO MPP( 37, 0x4, 0, 1, 1, 0, 0, 1, 1 ) -#define MPP37_TW1_SCK MPP( 37, 0xb, 1, 1, 0, 0, 0, 0, 1 ) +#define MPP37_TSMP1 MPP( 37, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP37_TDM_CH2_TX_QL MPP( 37, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP37_AU_SPDIFO MPP( 37, 0x4, 0, 0, 1, 0, 0, 1, 1 ) +#define MPP37_TW1_SCK MPP( 37, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP38_GPIO MPP( 38, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP38_TSMP2 MPP( 38, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP38_TDM_CH2_RX_QL MPP( 38, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP38_AU_SPDIFRMLCLK MPP( 38, 0x4, 0, 1, 1, 0, 0, 1, 1 ) +#define MPP38_TSMP2 MPP( 38, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP38_TDM_CH2_RX_QL MPP( 38, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP38_AU_SPDIFRMLCLK MPP( 38, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP38_LCD_D18 MPP( 38, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP39_GPIO MPP( 39, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP39_TSMP3 MPP( 39, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP39_TDM_SPI_CS0 MPP( 39, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP39_AU_I2SBCLK MPP( 39, 0x4, 0, 1, 1, 0, 0, 1, 1 ) +#define MPP39_TSMP3 MPP( 39, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP39_TDM_SPI_CS0 MPP( 39, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP39_AU_I2SBCLK MPP( 39, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP39_LCD_D19 MPP( 39, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP40_GPIO MPP( 40, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP40_TSMP4 MPP( 40, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP40_TDM_SPI_SCK MPP( 40, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP40_AU_I2SDO MPP( 40, 0x4, 0, 1, 1, 0, 0, 1, 1 ) +#define MPP40_TSMP4 MPP( 40, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP40_TDM_SPI_SCK MPP( 40, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP40_AU_I2SDO MPP( 40, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP40_LCD_D20 MPP( 40, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP41_GPIO MPP( 41, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP41_TSMP5 MPP( 41, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP41_TDM_SPI_MISO MPP( 41, 0x2, 1, 0, 0, 0, 0, 1, 1 ) -#define MPP41_AU_I2SLRCLK MPP( 41, 0x4, 0, 1, 1, 0, 0, 1, 1 ) +#define MPP41_TSMP5 MPP( 41, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP41_TDM_SPI_MISO MPP( 41, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP41_AU_I2SLRCLK MPP( 41, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP41_LCD_D21 MPP( 41, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP42_GPIO MPP( 42, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP42_TSMP6 MPP( 42, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP42_TDM_SPI_MOSI MPP( 42, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP42_AU_I2SMCLK MPP( 42, 0x4, 0, 1, 1, 0, 0, 1, 1 ) +#define MPP42_TSMP6 MPP( 42, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP42_TDM_SPI_MOSI MPP( 42, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP42_AU_I2SMCLK MPP( 42, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP42_LCD_D22 MPP( 42, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP43_GPIO MPP( 43, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP43_TSMP7 MPP( 43, 0x1, 1, 1, 0, 0, 0, 1, 1 ) +#define MPP43_TSMP7 MPP( 43, 0x1, 0, 0, 0, 0, 0, 1, 1 ) #define MPP43_TDM_CODEC_INTn MPP( 43, 0x2, 0, 0, 0, 0, 0, 1, 1 ) -#define MPP43_AU_I2SDI MPP( 43, 0x4, 1, 0, 1, 0, 0, 1, 1 ) +#define MPP43_AU_I2SDI MPP( 43, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP43_LCD_D23 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP44_GPIO MPP( 44, 0x0, 1, 1, 1, 0, 0, 1, 1 ) -#define MPP44_TSMP8 MPP( 44, 0x1, 1, 1, 0, 0, 0, 1, 1 ) +#define MPP44_TSMP8 MPP( 44, 0x1, 0, 0, 0, 0, 0, 1, 1 ) #define MPP44_TDM_CODEC_RSTn MPP( 44, 0x2, 0, 0, 0, 0, 0, 1, 1 ) -#define MPP44_AU_EXTCLK MPP( 44, 0x4, 1, 0, 1, 0, 0, 1, 1 ) +#define MPP44_AU_EXTCLK MPP( 44, 0x4, 0, 0, 1, 0, 0, 1, 1 ) #define MPP44_LCD_CLK MPP( 44, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP45_GPIO MPP( 45, 0x0, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP45_TSMP9 MPP( 45, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP45_TDM_PCLK MPP( 45, 0x2, 1, 1, 0, 0, 0, 1, 1 ) +#define MPP45_TSMP9 MPP( 45, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP45_TDM_PCLK MPP( 45, 0x2, 0, 0, 0, 0, 0, 1, 1 ) #define MPP245_LCD_E MPP( 45, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP46_GPIO MPP( 46, 0x0, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP46_TSMP10 MPP( 46, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP46_TDM_FS MPP( 46, 0x2, 1, 1, 0, 0, 0, 1, 1 ) +#define MPP46_TSMP10 MPP( 46, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP46_TDM_FS MPP( 46, 0x2, 0, 0, 0, 0, 0, 1, 1 ) #define MPP46_LCD_HSYNC MPP( 46, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP47_GPIO MPP( 47, 0x0, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP47_TSMP11 MPP( 47, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP47_TDM_DRX MPP( 47, 0x2, 1, 0, 0, 0, 0, 1, 1 ) +#define MPP47_TSMP11 MPP( 47, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP47_TDM_DRX MPP( 47, 0x2, 0, 0, 0, 0, 0, 1, 1 ) #define MPP47_LCD_VSYNC MPP( 47, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP48_GPIO MPP( 48, 0x0, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP48_TSMP12 MPP( 48, 0x1, 1, 1, 0, 0, 0, 1, 1 ) -#define MPP48_TDM_DTX MPP( 48, 0x2, 0, 1, 0, 0, 0, 1, 1 ) +#define MPP48_TSMP12 MPP( 48, 0x1, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP48_TDM_DTX MPP( 48, 0x2, 0, 0, 0, 0, 0, 1, 1 ) #define MPP48_LCD_D16 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP49_GPIO MPP( 49, 0x0, 1, 1, 0, 0, 0, 1, 0 ) #define MPP49_GPO MPP( 49, 0x0, 0, 1, 0, 0, 0, 0, 1 ) -#define MPP49_TSMP9 MPP( 49, 0x1, 1, 1, 0, 0, 0, 1, 0 ) -#define MPP49_TDM_CH0_RX_QL MPP( 49, 0x2, 0, 1, 0, 0, 0, 1, 1 ) -#define MPP49_PTP_CLK MPP( 49, 0x5, 1, 0, 0, 0, 0, 1, 0 ) -#define MPP49_PEX0_CLKREQ MPP( 49, 0xa, 0, 1, 0, 0, 0, 0, 1 ) +#define MPP49_TSMP9 MPP( 49, 0x1, 0, 0, 0, 0, 0, 1, 0 ) +#define MPP49_TDM_CH0_RX_QL MPP( 49, 0x2, 0, 0, 0, 0, 0, 1, 1 ) +#define MPP49_PTP_CLK MPP( 49, 0x5, 0, 0, 0, 0, 0, 1, 0 ) +#define MPP49_PEX0_CLKREQ MPP( 49, 0xa, 0, 0, 0, 0, 0, 0, 1 ) #define MPP49_LCD_D17 MPP( 49, 0xb, 0, 0, 0, 0, 0, 0, 1 ) #define MPP_MAX 49 diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c index 68f32f2bf55..eb1a7bae95c 100644 --- a/arch/arm/mach-kirkwood/ts219-setup.c +++ b/arch/arm/mach-kirkwood/ts219-setup.c @@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) static int __init ts219_pci_init(void) { if (machine_is_ts219()) - kirkwood_pcie_init(KW_PCIE0); + kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); return 0; } diff --git a/arch/arm/mach-lpc32xx/include/mach/irqs.h b/arch/arm/mach-lpc32xx/include/mach/irqs.h index 2667f52e3b0..9e3b90df32e 100644 --- a/arch/arm/mach-lpc32xx/include/mach/irqs.h +++ b/arch/arm/mach-lpc32xx/include/mach/irqs.h @@ -61,7 +61,7 @@ */ #define IRQ_LPC32XX_JTAG_COMM_TX LPC32XX_SIC1_IRQ(1) #define IRQ_LPC32XX_JTAG_COMM_RX LPC32XX_SIC1_IRQ(2) -#define IRQ_LPC32XX_GPI_11 LPC32XX_SIC1_IRQ(4) +#define IRQ_LPC32XX_GPI_28 LPC32XX_SIC1_IRQ(4) #define IRQ_LPC32XX_TS_P LPC32XX_SIC1_IRQ(6) #define IRQ_LPC32XX_TS_IRQ LPC32XX_SIC1_IRQ(7) #define IRQ_LPC32XX_TS_AUX LPC32XX_SIC1_IRQ(8) diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index 4eae566dfdc..c74de01ab5b 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c @@ -118,6 +118,10 @@ static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = { .event_group = &lpc32xx_event_pin_regs, .mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT, }, + [IRQ_LPC32XX_GPI_28] = { + .event_group = &lpc32xx_event_pin_regs, + .mask = LPC32XX_CLKPWR_EXTSRC_GPI_28_BIT, + }, [IRQ_LPC32XX_GPIO_00] = { .event_group = &lpc32xx_event_int_regs, .mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT, @@ -305,9 +309,18 @@ static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state) if (state) eventreg |= lpc32xx_events[d->irq].mask; - else + else { eventreg &= ~lpc32xx_events[d->irq].mask; + /* + * When disabling the wakeup, clear the latched + * event + */ + __raw_writel(lpc32xx_events[d->irq].mask, + lpc32xx_events[d->irq]. + event_group->rawstat_reg); + } + __raw_writel(eventreg, lpc32xx_events[d->irq].event_group->enab_reg); @@ -380,13 +393,15 @@ void __init lpc32xx_init_irq(void) /* Setup SIC1 */ __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE)); - __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE)); - __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE)); + __raw_writel(SIC1_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE)); + __raw_writel(SIC1_ATR_DEFAULT, + LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE)); /* Setup SIC2 */ __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE)); - __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE)); - __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE)); + __raw_writel(SIC2_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE)); + __raw_writel(SIC2_ATR_DEFAULT, + LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE)); /* Configure supported IRQ's */ for (i = 0; i < NR_IRQS; i++) { diff --git a/arch/arm/mach-lpc32xx/serial.c b/arch/arm/mach-lpc32xx/serial.c index 429cfdbb2b3..f2735281616 100644 --- a/arch/arm/mach-lpc32xx/serial.c +++ b/arch/arm/mach-lpc32xx/serial.c @@ -88,6 +88,7 @@ struct uartinit { char *uart_ck_name; u32 ck_mode_mask; void __iomem *pdiv_clk_reg; + resource_size_t mapbase; }; static struct uartinit uartinit_data[] __initdata = { @@ -97,6 +98,7 @@ static struct uartinit uartinit_data[] __initdata = { .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 5), .pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL, + .mapbase = LPC32XX_UART5_BASE, }, #endif #ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT @@ -105,6 +107,7 @@ static struct uartinit uartinit_data[] __initdata = { .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 3), .pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL, + .mapbase = LPC32XX_UART3_BASE, }, #endif #ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT @@ -113,6 +116,7 @@ static struct uartinit uartinit_data[] __initdata = { .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 4), .pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL, + .mapbase = LPC32XX_UART4_BASE, }, #endif #ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT @@ -121,6 +125,7 @@ static struct uartinit uartinit_data[] __initdata = { .ck_mode_mask = LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 6), .pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL, + .mapbase = LPC32XX_UART6_BASE, }, #endif }; @@ -165,11 +170,24 @@ void __init lpc32xx_serial_init(void) /* pre-UART clock divider set to 1 */ __raw_writel(0x0101, uartinit_data[i].pdiv_clk_reg); + + /* + * Force a flush of the RX FIFOs to work around a + * HW bug + */ + puart = uartinit_data[i].mapbase; + __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); + __raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart)); + j = LPC32XX_SUART_FIFO_SIZE; + while (j--) + tmp = __raw_readl( + LPC32XX_UART_DLL_FIFO(puart)); + __raw_writel(0, LPC32XX_UART_IIR_FCR(puart)); } /* This needs to be done after all UART clocks are setup */ __raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE); - for (i = 0; i < ARRAY_SIZE(uartinit_data) - 1; i++) { + for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) { /* Force a flush of the RX FIFOs to work around a HW bug */ puart = serial_std_platform_data[i].mapbase; __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart)); diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index f7de361bbb5..776edcc7d1a 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -169,7 +169,7 @@ obj-$(CONFIG_MSM7KV2_AUDIO) += qdsp5v2_2x/ obj-$(CONFIG_MSM7KV2_AUDIO) += htc_acoustic_7x30.o htc_acdb_7x30.o obj-$(CONFIG_MSM_QDSP6) += qdsp6/ -obj-$(CONFIG_MSM8X60_AUDIO) += qdsp6v2_1x/ +obj-$(CONFIG_MSM8X60_AUDIO) += qdsp6v3/ obj-$(CONFIG_MSM_AUDIO_QDSP6) += qdsp6v2/ obj-$(CONFIG_MSM_HW3D) += hw3d.o ifdef CONFIG_PM diff --git a/arch/arm/mach-msm/board-vigor-audio.c b/arch/arm/mach-msm/board-vigor-audio.c index beac20b8758..b96a4d904b6 100644 --- a/arch/arm/mach-msm/board-vigor-audio.c +++ b/arch/arm/mach-msm/board-vigor-audio.c @@ -19,16 +19,17 @@ #include #include #include +#include #include #include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include @@ -326,7 +327,7 @@ int vigor_is_msm_i2s_slave(void) int vigor_support_aic3254(void) { - return 0; + return 1; } int vigor_support_adie(void) @@ -336,7 +337,7 @@ int vigor_support_adie(void) int vigor_support_back_mic(void) { - return 0; + return 1; } int vigor_is_msm_i2s_master(void) @@ -404,10 +405,6 @@ static struct dev_ctrl_ops dops = { .support_opendsp = vigor_support_opendsp, }; -static struct q6asm_ops qops = { - .get_q6_effect = vigor_get_q6_effect_mode, -}; - void __init vigor_audio_init(void) { int i = 0; @@ -419,7 +416,6 @@ void __init vigor_audio_init(void) htc_8x60_register_ecodec_ops(&eops); htc_8x60_register_icodec_ops(&iops); htc_8x60_register_dev_ctrl_ops(&dops); - htc_8x60_register_q6asm_ops(&qops); acoustic_register_ops(&acoustic); /* PMIC GPIO Init (See board-vigor.c) */ diff --git a/arch/arm/mach-msm/board-vigor.c b/arch/arm/mach-msm/board-vigor.c index bae59260f3e..9a86563b7ec 100644 --- a/arch/arm/mach-msm/board-vigor.c +++ b/arch/arm/mach-msm/board-vigor.c @@ -84,7 +84,6 @@ #include #include #endif -#include #include #ifdef CONFIG_MSM_DSPS #include @@ -100,7 +99,7 @@ #include #include #ifdef CONFIG_USB_G_ANDROID -#include +#include #include #endif #include @@ -135,6 +134,9 @@ #include #include +#include +#include + #ifdef CONFIG_PERFLOCK #include #endif @@ -346,6 +348,10 @@ int __init vigor_init_panel(struct resource *res, size_t size); int set_two_phase_freq(int cpufreq); #endif +#ifdef CONFIG_ION_MSM +static struct platform_device ion_dev; +#endif + #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT static void (*sdc2_status_notify_cb)(int card_present, void *dev_id); static void *sdc2_status_notify_cb_devid; @@ -1654,19 +1660,7 @@ static int vigor_usb_product_id_match(int product_id, int intrsharing) #endif static struct android_usb_platform_data android_usb_pdata = { - .vendor_id = 0x0BB4, - .product_id = 0x0ccd, - .version = 0x0100, - .product_name = "Android Phone", - .manufacturer_name = "HTC", - .num_products = ARRAY_SIZE(usb_products), - .products = usb_products, - .num_functions = ARRAY_SIZE(usb_functions_all), - .functions = usb_functions_all, - .enable_fast_charge = NULL, .update_pid_and_serial_num = usb_diag_update_pid_and_serial_num, - .usb_id_pin_gpio = VIGOR_GPIO_USB_ID, - .fserial_init_string = "smd:modem,sdio:modem_mdm,tty,tty,tty:serial", #ifdef CONFIG_USB_GADGET_VERIZON_PRODUCT_ID .match = vigor_usb_product_id_match, #endif @@ -1683,7 +1677,6 @@ static struct platform_device android_usb_device = { static int __init board_serialno_setup(char *serialno) { - android_usb_pdata.serial_number = serialno; return 1; } __setup("androidboot.serialno=", board_serialno_setup); @@ -1691,24 +1684,9 @@ __setup("androidboot.serialno=", board_serialno_setup); static void vigor_add_usb_devices(void) { printk(KERN_INFO "%s rev: %d\n", __func__, system_rev); - android_usb_pdata.products[0].product_id = - android_usb_pdata.product_id; config_vigor_mhl_gpios(); - /* diag bit set */ - if (get_radio_flag() & 0x20000) { - android_usb_pdata.diag_init = 1; - android_usb_pdata.modem_init = 1; - android_usb_pdata.rmnet_init = 1; - } - - /* add cdrom support in normal mode */ - if (board_mfg_mode() == 0) { - android_usb_pdata.nluns = 3; - android_usb_pdata.cdrom_lun = 0x4; - } - msm_device_gadget_peripheral.dev.parent = &msm_device_otg.dev; platform_device_register(&msm_device_gadget_peripheral); platform_device_register(&android_usb_device); @@ -3162,6 +3140,7 @@ early_param("pmem_audio_size", pmem_audio_size_setup); #endif #ifdef CONFIG_ANDROID_PMEM +#ifndef CONFIG_ION_MSM static struct android_pmem_platform_data android_pmem_sf_pdata = { .name = "pmem", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, @@ -3174,7 +3153,7 @@ static struct platform_device android_pmem_sf_device = { .id = 1, .dev = { .platform_data = &android_pmem_sf_pdata }, }; - +#endif static struct android_pmem_platform_data android_pmem_adsp_pdata = { .name = "pmem_adsp", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, @@ -3214,6 +3193,39 @@ static struct platform_device android_pmem_audio_device = { .dev = { .platform_data = &android_pmem_audio_pdata }, }; +#ifdef CONFIG_ION_MSM +static struct ion_co_heap_pdata co_ion_pdata = { + .adjacent_mem_id = INVALID_HEAP_ID, + .align = PAGE_SIZE, +}; + +static struct ion_platform_data ion_pdata = { + .nr = 2, + .heaps = { + { + .id = ION_SYSTEM_HEAP_ID, + .type = ION_HEAP_TYPE_SYSTEM, + .name = ION_VMALLOC_HEAP_NAME, + }, + { + .id = ION_SF_HEAP_ID, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = ION_SF_HEAP_NAME, + .base = MSM_PMEM_SF_BASE, + .size = MSM_PMEM_SF_SIZE, + .memory_type = ION_EBI_TYPE, + .extra_data = (void *)&co_ion_pdata, + }, + } +}; + +static struct platform_device ion_dev = { + .name = "ion-msm", + .id = 1, + .dev = { .platform_data = &ion_pdata }, +}; +#endif + #define PMEM_BUS_WIDTH(_bw) \ { \ .vectors = &(struct msm_bus_vectors){ \ @@ -7545,7 +7557,9 @@ static struct platform_device *vigor_devices[] __initdata = { &msm_batt_device, #endif #ifdef CONFIG_ANDROID_PMEM +#ifndef CONFIG_ION_MSM &android_pmem_sf_device, +#endif &android_pmem_adsp_device, &android_pmem_adsp2_device, &android_pmem_audio_device, @@ -7633,6 +7647,9 @@ static struct platform_device *vigor_devices[] __initdata = { &msm_device_sdio_al, #endif &msm8660_device_watchdog, +#ifdef CONFIG_ION_MSM + &ion_dev, +#endif }; static struct memtype_reserve msm8x60_reserve_table[] __initdata = { @@ -7680,8 +7697,10 @@ static void __init size_pmem_devices(void) size_pmem_device(&android_pmem_adsp2_pdata, MSM_PMEM_ADSP2_BASE, MSM_PMEM_ADSP2_SIZE); size_pmem_device(&android_pmem_smipool_pdata, MSM_PMEM_SMIPOOL_BASE, MSM_PMEM_SMIPOOL_SIZE); size_pmem_device(&android_pmem_audio_pdata, MSM_PMEM_AUDIO_BASE, pmem_audio_size); +#ifndef CONFIG_ION_MSM size_pmem_device(&android_pmem_sf_pdata, MSM_PMEM_SF_BASE, pmem_sf_size); #endif +#endif } #ifdef CONFIG_ANDROID_PMEM @@ -7703,8 +7722,10 @@ static void __init reserve_pmem_memory(void) reserve_memory_for(&android_pmem_adsp_pdata); reserve_memory_for(&android_pmem_smipool_pdata); reserve_memory_for(&android_pmem_audio_pdata); +#ifndef CONFIG_ION_MSM reserve_memory_for(&android_pmem_sf_pdata); #endif +#endif } static void __init msm8x60_calculate_reserve_sizes(void) diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c index 1cda5103e6f..86d95b74cc0 100644 --- a/arch/arm/mach-msm/clock-8x60.c +++ b/arch/arm/mach-msm/clock-8x60.c @@ -3777,6 +3777,12 @@ static struct clk_lookup msm_clocks_8x60[] = { CLK_LOOKUP("iommu_clk", gfx2d0_clk.c, "msm_iommu.10"), CLK_LOOKUP("iommu_clk", gfx2d1_clk.c, "msm_iommu.11"), + CLK_LOOKUP("mdp_iommu_clk", mdp_axi_clk.c, "msm_vidc.0"), + CLK_LOOKUP("rot_iommu_clk", rot_axi_clk.c, "msm_vidc.0"), + CLK_LOOKUP("vcodec_iommu0_clk", vcodec_axi_clk.c, "msm_vidc.0"), + CLK_LOOKUP("vcodec_iommu1_clk", vcodec_axi_clk.c, "msm_vidc.0"), + CLK_LOOKUP("smmu_iface_clk", smmu_p_clk.c, "msm_vidc.0"), + CLK_LOOKUP("dfab_dsps_clk", dfab_dsps_clk.c, NULL), CLK_LOOKUP("core_clk", dfab_usb_hs_clk.c, "msm_otg"), CLK_LOOKUP("bus_clk", dfab_sdc1_clk.c, "msm_sdcc.1"), diff --git a/arch/arm/mach-msm/clock-8x60.h b/arch/arm/mach-msm/clock-8x60.h new file mode 100644 index 00000000000..e9effae0ac8 --- /dev/null +++ b/arch/arm/mach-msm/clock-8x60.h @@ -0,0 +1,253 @@ +/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __ARCH_ARM_MACH_MSM_CLOCK_8X60_H +#define __ARCH_ARM_MACH_MSM_CLOCK_8X60_H + +#include "clock-local.h" + +enum { + /* Peripheral Clocks */ + L_GSBI1_UART_CLK, //0 + L_GSBI2_UART_CLK, + L_GSBI3_UART_CLK, + L_GSBI4_UART_CLK, + L_GSBI5_UART_CLK, + L_GSBI6_UART_CLK, + L_GSBI7_UART_CLK, + L_GSBI8_UART_CLK, + L_GSBI9_UART_CLK, + L_GSBI10_UART_CLK, + L_GSBI11_UART_CLK, //10 + L_GSBI12_UART_CLK, + L_GSBI1_QUP_CLK, + L_GSBI2_QUP_CLK, + L_GSBI3_QUP_CLK, + L_GSBI4_QUP_CLK, + L_GSBI5_QUP_CLK, + L_GSBI6_QUP_CLK, + L_GSBI7_QUP_CLK, + L_GSBI8_QUP_CLK, + L_GSBI9_QUP_CLK, //20 + L_GSBI10_QUP_CLK, + L_GSBI11_QUP_CLK, + L_GSBI12_QUP_CLK, + L_PDM_CLK, + L_PMEM_CLK, + L_PRNG_CLK, + L_SDC1_CLK, + L_SDC2_CLK, + L_SDC3_CLK, + L_SDC4_CLK, //30 + L_SDC5_CLK, + L_TSIF_REF_CLK, + L_TSSC_CLK, + L_USB_HS1_XCVR_CLK, + L_USB_PHY0_CLK, + L_USB_FS1_SRC_CLK, + L_USB_FS1_XCVR_CLK, + L_USB_FS1_SYS_CLK, + L_USB_FS2_SRC_CLK, + L_USB_FS2_XCVR_CLK, //40 + L_USB_FS2_SYS_CLK, + + /* HW-Voteable Clocks */ + L_ADM0_CLK, + L_ADM0_P_CLK, + L_ADM1_CLK, + L_ADM1_P_CLK, + L_MODEM_AHB1_P_CLK, + L_MODEM_AHB2_P_CLK, + L_PMIC_ARB0_P_CLK, + L_PMIC_ARB1_P_CLK, + L_PMIC_SSBI2_CLK, //50 + L_RPM_MSG_RAM_P_CLK, + + /* Fast Peripheral Bus Clocks */ + L_CE2_P_CLK, + L_GSBI1_P_CLK, + L_GSBI2_P_CLK, + L_GSBI3_P_CLK, + L_GSBI4_P_CLK, + L_GSBI5_P_CLK, + L_GSBI6_P_CLK, + L_GSBI7_P_CLK, + L_GSBI8_P_CLK, //60 + L_GSBI9_P_CLK, + L_GSBI10_P_CLK, + L_GSBI11_P_CLK, + L_GSBI12_P_CLK, + L_PPSS_P_CLK, + L_TSIF_P_CLK, + L_USB_FS1_P_CLK, + L_USB_FS2_P_CLK, + L_USB_HS1_P_CLK, + L_SDC1_P_CLK, //70 + L_SDC2_P_CLK, + L_SDC3_P_CLK, + L_SDC4_P_CLK, + L_SDC5_P_CLK, + + /* Multimedia Clocks */ + L_AMP_CLK, + L_CAM_CLK, + L_CSI_SRC_CLK, + L_CSI0_CLK, + L_CSI1_CLK, + L_DSI_BYTE_CLK, //80 + L_DSI_ESC_CLK, + L_GFX2D0_CLK, + L_GFX2D1_CLK, + L_GFX3D_CLK, + L_IJPEG_CLK, + L_JPEGD_CLK, + L_MDP_CLK, + L_MDP_VSYNC_CLK, + L_PIXEL_SRC_CLK, + L_PIXEL_MDP_CLK, //90 + L_PIXEL_LCDC_CLK, + L_ROT_CLK, + L_TV_SRC_CLK, + L_TV_ENC_CLK, + L_TV_DAC_CLK, + L_VCODEC_CLK, + L_MDP_TV_CLK, + L_HDMI_TV_CLK, + L_HDMI_APP_CLK, + L_VPE_CLK, //100 + L_VFE_CLK, + L_CSI0_VFE_CLK, + L_CSI1_VFE_CLK, + L_GMEM_AXI_CLK, + L_IJPEG_AXI_CLK, + L_IMEM_AXI_CLK, + L_JPEGD_AXI_CLK, + L_VCODEC_AXI_CLK, + L_VFE_AXI_CLK, + L_MDP_AXI_CLK, //110 + L_ROT_AXI_CLK, + L_VPE_AXI_CLK, + + /* Multimedia Fast Peripheral Bus Clocks */ + L_AMP_P_CLK, + L_CSI0_P_CLK, + L_CSI1_P_CLK, + L_DSI_M_P_CLK, + L_DSI_S_P_CLK, + L_GFX2D0_P_CLK, + L_GFX2D1_P_CLK, + L_GFX3D_P_CLK, //120 + L_HDMI_M_P_CLK, + L_HDMI_S_P_CLK, + L_IJPEG_P_CLK, + L_IMEM_P_CLK, + L_JPEGD_P_CLK, + L_MDP_P_CLK, + L_ROT_P_CLK, + L_SMMU_P_CLK, + L_TV_ENC_P_CLK, + L_VCODEC_P_CLK, //130 + L_VFE_P_CLK, + L_VPE_P_CLK, + + /* LPA Clocks */ + L_MI2S_SRC_CLK, + L_MI2S_OSR_CLK, + L_MI2S_BIT_CLK, + L_CODEC_I2S_MIC_OSR_CLK, + L_CODEC_I2S_MIC_BIT_CLK, + L_SPARE_I2S_MIC_OSR_CLK, + L_SPARE_I2S_MIC_BIT_CLK, + L_CODEC_I2S_SPKR_OSR_CLK, //140 + L_CODEC_I2S_SPKR_BIT_CLK, + L_SPARE_I2S_SPKR_OSR_CLK, + L_SPARE_I2S_SPKR_BIT_CLK, + L_PCM_CLK, + + /* Measurement-only Clocks */ + L_SC0_DIV2_M_CLK, + L_SC1_DIV2_M_CLK, + L_L2_DIV2_M_CLK, + L_AFAB_M_CLK, + L_SFAB_M_CLK, + L_EBI1_2X_M_CLK, + L_CFPB0_M_CLK, + L_CFPB1_M_CLK, + L_CFPB2_M_CLK, + L_DFAB_M_CLK, + L_SFPB_M_CLK, + L_MMFAB_M_CLK, + L_SMI_DDR2X_M_CLK, + L_MMFPB_M_CLK, + + L_NR_CLKS //145 +}; + +enum clk_sources { + PLL_0 = 0, + PLL_1, + PLL_2, + PLL_3, + PLL_4, + PLL_6, + PLL_7, + PLL_8, + PXO, + CXO, + NUM_SRC +}; + +/*extern struct clk_local soc_clk_local_tbl_mxo[];*/ + +struct pll_rate { + const uint32_t l_val; + const uint32_t m_val; + const uint32_t n_val; + const uint32_t vco; + const uint32_t post_div; + const uint32_t i_bits; +}; +#define PLL_RATE(l, m, n, v, d, i) { l, m, n, v, (d>>1), i } + +extern struct clk_ops soc_clk_ops_8x60; +#define CLK_8X60(clk_name, clk_id, clk_dev, clk_flags) { \ + .con_id = clk_name, \ + .dev_id = clk_dev, \ + .clk = &(struct clk){ \ + .id = L_##clk_id, \ + .ops = &soc_clk_ops_8x60, \ + .flags = clk_flags, \ + .dbg_name = #clk_id, \ + .name = clk_name, \ + }, \ + } + +void soc_clk_src_votes_show(void); + +#endif diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c index 46f7d668aef..e8eb4826876 100644 --- a/arch/arm/mach-msm/devices-msm8x60.c +++ b/arch/arm/mach-msm/devices-msm8x60.c @@ -2396,10 +2396,12 @@ struct msm_vidc_platform_data vidc_platform_data = { #ifdef CONFIG_MSM_BUS_SCALING .vidc_bus_client_pdata = &vidc_bus_client_data, #endif - .memtype = MEMTYPE_SMI_KERNEL, #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION + .memtype = ION_CP_MM_HEAP_ID, .enable_ion = 1, + .cp_enabled = 0, #else + .memtype = MEMTYPE_SMI_KERNEL, .enable_ion = 0, #endif .disable_dmx = 0, diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h index b241ee6857d..5b8afbb80ee 100644 --- a/arch/arm/mach-msm/include/mach/board.h +++ b/arch/arm/mach-msm/include/mach/board.h @@ -517,6 +517,7 @@ struct msm_vidc_platform_data { u32 enable_ion; int disable_dmx; int disable_fullhd; + u32 cp_enabled; #ifdef CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *vidc_bus_client_pdata; #endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/apr.h b/arch/arm/mach-msm/include/mach/qdsp6v3/apr.h new file mode 100644 index 00000000000..19f1860aa48 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/apr.h @@ -0,0 +1,189 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __APR_H_ +#define __APR_H_ + +#define APR_Q6_NOIMG 0 +#define APR_Q6_LOADING 1 +#define APR_Q6_LOADED 2 + +struct apr_q6 { + void *pil; + uint32_t state; + struct mutex lock; +}; + +struct apr_hdr { + uint16_t hdr_field; + uint16_t pkt_size; + uint8_t src_svc; + uint8_t src_domain; + uint16_t src_port; + uint8_t dest_svc; + uint8_t dest_domain; + uint16_t dest_port; + uint32_t token; + uint32_t opcode; +}; + +#define APR_HDR_LEN(hdr_len) ((hdr_len)/4) +#define APR_PKT_SIZE(hdr_len, payload_len) ((hdr_len) + (payload_len)) +#define APR_HDR_FIELD(msg_type, hdr_len, ver)\ + (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF)) + +#define APR_HDR_SIZE sizeof(struct apr_hdr) + +/* Version */ +#define APR_PKT_VER 0x0 + +/* Command and Response Types */ +#define APR_MSG_TYPE_EVENT 0x0 +#define APR_MSG_TYPE_CMD_RSP 0x1 +#define APR_MSG_TYPE_SEQ_CMD 0x2 +#define APR_MSG_TYPE_NSEQ_CMD 0x3 +#define APR_MSG_TYPE_MAX 0x04 + +/* APR Basic Response Message */ +#define APR_BASIC_RSP_RESULT 0x000110E8 +#define APR_RSP_ACCEPTED 0x000100BE + +/* Domain IDs */ +#define APR_DOMAIN_SIM 0x1 +#define APR_DOMAIN_PC 0x2 +#define APR_DOMAIN_MODEM 0x3 +#define APR_DOMAIN_ADSP 0x4 +#define APR_DOMAIN_APPS 0x5 +#define APR_DOMAIN_MAX 0x6 + +/* ADSP service IDs */ +#define APR_SVC_TEST_CLIENT 0x2 +#define APR_SVC_ADSP_CORE 0x3 +#define APR_SVC_AFE 0x4 +#define APR_SVC_VSM 0x5 +#define APR_SVC_VPM 0x6 +#define APR_SVC_ASM 0x7 +#define APR_SVC_ADM 0x8 +#define APR_SVC_ADSP_MVM 0x09 +#define APR_SVC_ADSP_CVS 0x0A +#define APR_SVC_ADSP_CVP 0x0B +#define APR_SVC_MAX 0x0C + +/* Modem Service IDs */ +#define APR_SVC_MVS 0x3 +#define APR_SVC_MVM 0x4 +#define APR_SVC_CVS 0x5 +#define APR_SVC_CVP 0x6 +#define APR_SVC_SRD 0x7 + +/* APR Port IDs */ +#define APR_MAX_PORTS 0x40 + +#define APR_NAME_MAX 0x40 + +#define RESET_EVENTS 0xFFFFFFFF + +#define LPASS_RESTART_EVENT 0x1000 +#define LPASS_RESTART_READY 0x1001 + +struct apr_client_data { + uint16_t reset_event; + uint16_t reset_proc; + uint16_t payload_size; + uint16_t hdr_len; + uint16_t msg_type; + uint16_t src; + uint16_t dest_svc; + uint16_t src_port; + uint16_t dest_port; + uint32_t token; + uint32_t opcode; + void *payload; +}; + +typedef int32_t (*apr_fn)(struct apr_client_data *data, void *priv); + +struct apr_svc { + uint16_t id; + uint16_t dest_id; + uint16_t client_id; + uint8_t rvd; + uint8_t port_cnt; + uint8_t svc_cnt; + uint8_t need_reset; + apr_fn port_fn[APR_MAX_PORTS]; + void *port_priv[APR_MAX_PORTS]; + apr_fn fn; + void *priv; + struct mutex m_lock; + spinlock_t w_lock; +}; + +struct apr_client { + uint8_t id; + uint8_t svc_cnt; + uint8_t rvd; + struct mutex m_lock; + struct apr_svc_ch_dev *handle; + struct apr_svc svc[APR_SVC_MAX]; +}; + +#define ADSP_GET_VERSION 0x00011152 +#define ADSP_GET_VERSION_RSP 0x00011153 + +struct adsp_get_version { + uint32_t build_id; + uint32_t svc_cnt; +}; + +struct adsp_service_info { + uint32_t svc_id; + uint32_t svc_ver; +}; + +#define ADSP_CMD_SET_POWER_COLLAPSE_STATE 0x0001115C +struct adsp_power_collapse { + struct apr_hdr hdr; + uint32_t power_collapse; +}; + +struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, + uint32_t src_port, void *priv); +inline int apr_fill_hdr(void *handle, uint32_t *buf, uint16_t src_port, + uint16_t msg_type, uint16_t dest_port, + uint32_t token, uint32_t opcode, uint16_t len); + +int apr_send_pkt(void *handle, uint32_t *buf); +int apr_deregister(void *handle); +void change_q6_state(int state); +void q6audio_dsp_not_responding(void); +uint32_t core_get_adsp_version(void); +void *core_open(void); +int32_t core_close(void); +void apr_reset(void *handle); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h b/arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h new file mode 100644 index 00000000000..a6657cffbd5 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/apr_audio.h @@ -0,0 +1,1012 @@ +/* + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#ifndef _APR_AUDIO_H_ +#define _APR_AUDIO_H_ + +/* ASM opcodes without APR payloads*/ +#include + +/* + * Audio Front End (AFE) + */ + +/* Port ID. Update afe_get_port_index when a new port is added here. */ +#define PRIMARY_I2S_RX 0 /* index = 0 */ +#define PRIMARY_I2S_TX 1 /* index = 1 */ +#define PCM_RX 2 /* index = 2 */ +#define PCM_TX 3 /* index = 3 */ +#define SECONDARY_I2S_RX 4 /* index = 4 */ +#define SECONDARY_I2S_TX 5 /* index = 5 */ +#define MI2S_RX 6 /* index = 6 */ +#define MI2S_TX 7 /* index = 7 */ +#define HDMI_RX 8 /* index = 8 */ +#define RSVD_2 9 /* index = 9 */ +#define RSVD_3 10 /* index = 10 */ +#define DIGI_MIC_TX 11 /* index = 11 */ +#define VOICE_RECORD_RX 0x8003 /* index = 12 */ +#define VOICE_RECORD_TX 0x8004 /* index = 13 */ +#define VOICE_PLAYBACK_TX 0x8005 /* index = 14 */ +#define AFE_PORT_INVALID 0xFFFF + +#define AFE_PORT_CMD_START 0x000100ca +struct afe_port_start_command { + struct apr_hdr hdr; + u16 port_id; + u16 gain; /* Q13 */ + u32 sample_rate; /* 8 , 16, 48khz */ +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_STOP 0x000100cb +struct afe_port_stop_command { + struct apr_hdr hdr; + u16 port_id; + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_APPLY_GAIN 0x000100cc +struct afe_port_gain_command { + struct apr_hdr hdr; + u16 port_id; + u16 gain;/* Q13 */ +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_SIDETONE_CTL 0x000100cd +struct afe_port_sidetone_command { + struct apr_hdr hdr; + u16 rx_port_id; /* Primary i2s tx = 1 */ + /* PCM tx = 3 */ + /* Secondary i2s tx = 5 */ + /* Mi2s tx = 7 */ + /* Digital mic tx = 11 */ + u16 tx_port_id; /* Primary i2s rx = 0 */ + /* PCM rx = 2 */ + /* Secondary i2s rx = 4 */ + /* Mi2S rx = 6 */ + /* HDMI rx = 8 */ + u16 gain; /* Q13 */ + u16 enable; /* 1 = enable, 0 = disable */ +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_LOOPBACK 0x000100ce +struct afe_loopback_command { + struct apr_hdr hdr; + u16 tx_port_id; /* Primary i2s rx = 0 */ + /* PCM rx = 2 */ + /* Secondary i2s rx = 4 */ + /* Mi2S rx = 6 */ + /* HDMI rx = 8 */ + u16 rx_port_id; /* Primary i2s tx = 1 */ + /* PCM tx = 3 */ + /* Secondary i2s tx = 5 */ + /* Mi2s tx = 7 */ + /* Digital mic tx = 11 */ + u16 mode; /* Default -1, DSP will conver + the tx to rx format */ + u16 enable; /* 1 = enable, 0 = disable */ +} __attribute__ ((packed)); + +#define AFE_PSEUDOPORT_CMD_START 0x000100cf +struct afe_pseudoport_start_command { + struct apr_hdr hdr; + u16 port_id; /* Pseudo Port 1 = 0x8000 */ + /* Pseudo Port 2 = 0x8001 */ + /* Pseudo Port 3 = 0x8002 */ + u16 timing; /* FTRT = 0 , AVTimer = 1, */ +} __attribute__ ((packed)); + +#define AFE_PSEUDOPORT_CMD_STOP 0x000100d0 +struct afe_pseudoport_stop_command { + struct apr_hdr hdr; + u16 port_id; /* Pseudo Port 1 = 0x8000 */ + /* Pseudo Port 2 = 0x8001 */ + /* Pseudo Port 3 = 0x8002 */ + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_CMD_GET_ACTIVE_PORTS 0x000100d1 + + +#define AFE_CMD_GET_ACTIVE_HANDLES_FOR_PORT 0x000100d2 +struct afe_get_active_handles_command { + struct apr_hdr hdr; + u16 port_id; + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_PCM_CFG_MODE_PCM 0x0 +#define AFE_PCM_CFG_MODE_AUX 0x1 +#define AFE_PCM_CFG_SYNC_EXT 0x0 +#define AFE_PCM_CFG_SYNC_INT 0x1 +#define AFE_PCM_CFG_FRM_8BPF 0x0 +#define AFE_PCM_CFG_FRM_16BPF 0x1 +#define AFE_PCM_CFG_FRM_32BPF 0x2 +#define AFE_PCM_CFG_FRM_64BPF 0x3 +#define AFE_PCM_CFG_FRM_128BPF 0x4 +#define AFE_PCM_CFG_FRM_256BPF 0x5 +#define AFE_PCM_CFG_QUANT_ALAW_NOPAD 0x0 +#define AFE_PCM_CFG_QUANT_MULAW_NOPAD 0x1 +#define AFE_PCM_CFG_QUANT_LINEAR_NOPAD 0x2 +#define AFE_PCM_CFG_QUANT_ALAW_PAD 0x3 +#define AFE_PCM_CFG_QUANT_MULAW_PAD 0x4 +#define AFE_PCM_CFG_QUANT_LINEAR_PAD 0x5 +#define AFE_PCM_CFG_CDATAOE_MASTER 0x0 +#define AFE_PCM_CFG_CDATAOE_SHARE 0x1 + +struct afe_port_pcm_cfg { + u16 mode; /* PCM (short sync) = 0, AUXPCM (long sync) = 1 */ + u16 sync; /* external = 0 , internal = 1 */ + u16 frame; /* 8 bpf = 0 */ + /* 16 bpf = 1 */ + /* 32 bpf = 2 */ + /* 64 bpf = 3 */ + /* 128 bpf = 4 */ + /* 256 bpf = 5 */ + u16 quant; + u16 slot; /* Slot for PCM stream , 0 - 31 */ + u16 data; /* 0, PCM block is the only master */ + /* 1, PCM block is shares to driver data out signal */ + /* other master */ + u16 reserved; +} __attribute__ ((packed)); + +enum { + AFE_I2S_SD0 = 1, + AFE_I2S_SD1, + AFE_I2S_SD2, + AFE_I2S_SD3, + AFE_I2S_QUAD01, + AFE_I2S_QUAD23, + AFE_I2S_6CHS, + AFE_I2S_8CHS, +}; + +#define AFE_MI2S_MONO 0 +#define AFE_MI2S_STEREO 3 +#define AFE_MI2S_4CHANNELS 4 +#define AFE_MI2S_6CHANNELS 6 +#define AFE_MI2S_8CHANNELS 8 + +struct afe_port_mi2s_cfg { + u16 bitwidth; /* 16,24,32 */ + u16 line; /* Called ChannelMode in documentation */ + /* i2s_sd0 = 1 */ + /* i2s_sd1 = 2 */ + /* i2s_sd2 = 3 */ + /* i2s_sd3 = 4 */ + /* i2s_quad01 = 5 */ + /* i2s_quad23 = 6 */ + /* i2s_6chs = 7 */ + /* i2s_8chs = 8 */ + u16 channel; /* Called MonoStereo in documentation */ + /* i2s mono = 0 */ + /* i2s mono right = 1 */ + /* i2s mono left = 2 */ + /* i2s stereo = 3 */ + u16 ws; /* 0, word select signal from external source */ + /* 1, word select signal from internal source */ + u16 reserved; +} __attribute__ ((packed)); + +struct afe_port_hdmi_cfg { + u16 bitwidth; /* 16,24,32 */ + u16 channel_mode; /* HDMI Stereo = 0 */ + /* HDMI_3Point1 (4-ch) = 1 */ + /* HDMI_5Point1 (6-ch) = 2 */ + /* HDMI_6Point1 (8-ch) = 3 */ + u16 data_type; /* HDMI_Linear = 0 */ + /* HDMI_non_Linaer = 1 */ +} __attribute__ ((packed)); + +#define AFE_PORT_AUDIO_IF_CONFIG 0x000100d3 + +union afe_port_config { + struct afe_port_pcm_cfg pcm; + struct afe_port_mi2s_cfg mi2s; + struct afe_port_hdmi_cfg hdmi; +} __attribute__((packed)); + +struct afe_audioif_config_command { + struct apr_hdr hdr; + u16 port_id; + union afe_port_config port; +} __attribute__ ((packed)); + +#define AFE_TEST_CODEC_LOOPBACK_CTL 0x000100d5 +struct afe_codec_loopback_command { + u16 port_inf; /* Primary i2s = 0 */ + /* PCM = 2 */ + /* Secondary i2s = 4 */ + /* Mi2s = 6 */ + u16 enable; /* 0, disable. 1, enable */ +} __attribute__ ((packed)); + + +#define AFE_PARAM_ID_SIDETONE_GAIN 0x00010300 +struct afe_param_sidetone_gain { + u16 gain; + u16 reserved; +} __attribute__ ((packed)); + +#define AFE_PARAM_ID_SAMPLING_RATE 0x00010301 +struct afe_param_sampling_rate { + u32 sampling_rate; +} __attribute__ ((packed)); + + +#define AFE_PARAM_ID_CHANNELS 0x00010302 +struct afe_param_channels { + u16 channels; + u16 reserved; +} __attribute__ ((packed)); + + +#define AFE_PARAM_ID_LOOPBACK_GAIN 0x00010303 +struct afe_param_loopback_gain { + u16 gain; + u16 reserved; +} __attribute__ ((packed)); + + +#define AFE_MODULE_ID_PORT_INFO 0x00010200 +struct afe_param_payload { + u32 module_id; + u32 param_id; + u16 param_size; + u16 reserved; + union { + struct afe_param_sidetone_gain sidetone_gain; + struct afe_param_sampling_rate sampling_rate; + struct afe_param_channels channels; + struct afe_param_loopback_gain loopback_gain; + } __attribute__((packed)) param; +} __attribute__ ((packed)); + +#define AFE_PORT_CMD_SET_PARAM 0x000100dc + +struct afe_port_cmd_set_param { + struct apr_hdr hdr; + u16 port_id; + u16 payload_size; + u32 payload_address; + struct afe_param_payload payload; +} __attribute__ ((packed)); + + +#define AFE_EVENT_GET_ACTIVE_PORTS 0x00010100 +struct afe_get_active_ports_rsp { + u16 num_ports; + u16 port_id; +} __attribute__ ((packed)); + + +#define AFE_EVENT_GET_ACTIVE_HANDLES 0x00010102 +struct afe_get_active_handles_rsp { + u16 port_id; + u16 num_handles; + u16 mode; /* 0, voice rx */ + /* 1, voice tx */ + /* 2, audio rx */ + /* 3, audio tx */ + u16 handle; +} __attribute__ ((packed)); + +#define ADM_MAX_COPPS 5 + +#define ADM_SERVICE_CMD_GET_COPP_HANDLES 0x00010300 +struct adm_get_copp_handles_command { + struct apr_hdr hdr; +} __attribute__ ((packed)); + +#define ADM_CMD_MATRIX_MAP_ROUTINGS 0x00010301 +struct adm_routings_session { + u16 id; + u16 num_copps; + u16 copp_id[ADM_MAX_COPPS+1]; /*Padding if numCopps is odd */ +} __packed; + +struct adm_routings_command { + struct apr_hdr hdr; + u32 path; /* 0 = Rx, 1 Tx */ + u32 num_sessions; + struct adm_routings_session session[8]; +} __attribute__ ((packed)); + + +#define ADM_CMD_MATRIX_RAMP_GAINS 0x00010302 +struct adm_ramp_gain { + struct apr_hdr hdr; + u16 session_id; + u16 copp_id; + u16 initial_gain; + u16 gain_increment; + u16 ramp_duration; + u16 reserved; +} __attribute__ ((packed)); + +struct adm_ramp_gains_command { + struct apr_hdr hdr; + u32 id; + u32 num_gains; + struct adm_ramp_gain gains[ADM_MAX_COPPS]; +} __attribute__ ((packed)); + + +#define ADM_CMD_COPP_OPEN 0x00010304 +struct adm_copp_open_command { + struct apr_hdr hdr; + u16 flags; + u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */ + u16 endpoint_id1; + u16 endpoint_id2; + u32 topology_id; + u16 channel_config; + u16 reserved; + u32 rate; +} __attribute__ ((packed)); + +#define ADM_CMD_COPP_CLOSE 0x00010305 + +#define ADM_CMD_MEMORY_MAP 0x00010C30 +struct adm_cmd_memory_map{ + struct apr_hdr hdr; + u32 buf_add; + u32 buf_size; + u16 mempool_id; + u16 reserved; +} __attribute__((packed)); + +#define ADM_CMD_MEMORY_UNMAP 0x00010C31 +struct adm_cmd_memory_unmap{ + struct apr_hdr hdr; + u32 buf_add; +} __attribute__((packed)); + +#define ADM_CMD_MEMORY_MAP_REGIONS 0x00010C47 +struct adm_memory_map_regions{ + u32 phys; + u32 buf_size; +} __attribute__((packed)); + +struct adm_cmd_memory_map_regions{ + struct apr_hdr hdr; + u16 mempool_id; + u16 nregions; +} __attribute__((packed)); + +#define ADM_CMD_MEMORY_UNMAP_REGIONS 0x00010C48 +struct adm_memory_unmap_regions{ + u32 phys; +} __attribute__((packed)); + +struct adm_cmd_memory_unmap_regions{ + struct apr_hdr hdr; + u16 nregions; + u16 reserved; +} __attribute__((packed)); + +#define DEFAULT_COPP_TOPOLOGY 0x00010be3 +#define DEFAULT_POPP_TOPOLOGY 0x00010be4 +#define VPM_TX_SM_ECNS_COPP_TOPOLOGY 0x00010F71 +#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY 0x00010F72 +#define HTC_STEREO_RECORD_TOPOLOGY 0x10000000 +#define HTC_COPP_TOPOLOGY 0x10000001 + +#define ASM_MAX_EQ_BANDS 12 + +struct asm_eq_band { + u32 band_idx; /* The band index, 0 .. 11 */ + u32 filter_type; /* Filter band type */ + u32 center_freq_hz; /* Filter band center frequency */ + u32 filter_gain; /* Filter band initial gain (dB) */ + /* Range is +12 dB to -12 dB with 1dB increments. */ + u32 q_factor; +} __attribute__ ((packed)); + +struct asm_equalizer_params { + u32 enable; + u32 num_bands; + struct asm_eq_band eq_bands[ASM_MAX_EQ_BANDS]; +} __attribute__ ((packed)); + +struct asm_master_gain_params { + u16 master_gain; + u16 padding; +} __attribute__ ((packed)); + +struct asm_lrchannel_gain_params { + u16 left_gain; + u16 right_gain; +} __attribute__ ((packed)); + +struct asm_mute_params { + u32 muteflag; +} __attribute__ ((packed)); + +struct asm_softvolume_params { + u32 period; + u32 step; + u32 rampingcurve; +} __attribute__ ((packed)); + +struct asm_softpause_params { + u32 enable; + u32 period; + u32 step; + u32 rampingcurve; +} __packed; + +struct asm_pp_param_data_hdr { + u32 module_id; + u32 param_id; + u16 param_size; + u16 reserved; +} __attribute__ ((packed)); + +struct asm_pp_params_command { + struct apr_hdr hdr; + u32 *payload; + u32 payload_size; + struct asm_pp_param_data_hdr params; +} __attribute__ ((packed)); + +#define EQUALIZER_MODULE_ID 0x00010c27 +#define EQUALIZER_PARAM_ID 0x00010c28 + +#define VOLUME_CONTROL_MODULE_ID 0x00010bfe +#define MASTER_GAIN_PARAM_ID 0x00010bff +#define L_R_CHANNEL_GAIN_PARAM_ID 0x00010c00 +#define MUTE_CONFIG_PARAM_ID 0x00010c01 +#define SOFT_PAUSE_PARAM_ID 0x00010D6A + +#define IIR_FILTER_ENABLE_PARAM_ID 0x00010c03 +#define IIR_FILTER_PREGAIN_PARAM_ID 0x00010c04 +#define IIR_FILTER_CONFIG_PARAM_ID 0x00010c05 + +#define MBADRC_MODULE_ID 0x00010c06 +#define MBADRC_ENABLE_PARAM_ID 0x00010c07 +#define MBADRC_CONFIG_PARAM_ID 0x00010c08 + + +#define ADM_CMD_SET_PARAMS 0x00010306 +#define ADM_CMD_GET_PARAMS 0x0001030B +#define ADM_CMDRSP_GET_PARAMS 0x0001030C +struct adm_set_params_command { + struct apr_hdr hdr; + u32 payload; + u32 payload_size; +} __attribute__ ((packed)); + + +#define ADM_CMD_TAP_COPP_PCM 0x00010307 +struct adm_tap_copp_pcm_command { + struct apr_hdr hdr; +} __attribute__ ((packed)); + + +/* QDSP6 to Client messages +*/ +#define ADM_SERVICE_CMDRSP_GET_COPP_HANDLES 0x00010308 +struct adm_get_copp_handles_respond { + struct apr_hdr hdr; + u32 handles; + u32 copp_id; +} __attribute__ ((packed)); + +#define ADM_CMDRSP_COPP_OPEN 0x0001030A +struct adm_copp_open_respond { + u32 status; + u16 copp_id; + u16 reserved; +} __attribute__ ((packed)); + +#define ASM_STREAM_PRIORITY_NORMAL 0 +#define ASM_STREAM_PRIORITY_LOW 1 +#define ASM_STREAM_PRIORITY_HIGH 2 +#define ASM_STREAM_PRIORITY_RESERVED 3 + +#define ASM_END_POINT_DEVICE_MATRIX 0 +#define ASM_END_POINT_STREAM 1 + +#define AAC_ENC_MODE_AAC_LC 0x02 +#define AAC_ENC_MODE_AAC_P 0x05 +#define AAC_ENC_MODE_EAAC_P 0x1D + +#define ASM_STREAM_CMD_CLOSE 0x00010BCD +#define ASM_STREAM_CMD_FLUSH 0x00010BCE +#define ASM_STREAM_CMD_SET_PP_PARAMS 0x00010BCF +#define ASM_STREAM_CMD_GET_PP_PARAMS 0x00010BD0 +#define ASM_STREAM_CMDRSP_GET_PP_PARAMS 0x00010BD1 +#define ASM_SESSION_CMD_PAUSE 0x00010BD3 +#define ASM_SESSION_CMD_GET_SESSION_TIME 0x00010BD4 +#define ASM_DATA_CMD_EOS 0x00010BDB +#define ASM_DATA_EVENT_EOS 0x00010BDD + +#define ASM_SERVICE_CMD_GET_STREAM_HANDLES 0x00010C0B +#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09 + +#define ASM_SESSION_EVENT_RX_UNDERFLOW 0x00010C17 +#define ASM_SESSION_EVENT_TX_OVERFLOW 0x00010C18 +#define ASM_SERVICE_CMD_GET_WALLCLOCK_TIME 0x00010C19 +#define ASM_DATA_CMDRSP_EOS 0x00010C1C + +/* ASM Data structures */ + +/* common declarations */ +struct asm_pcm_cfg { + u16 ch_cfg; + u16 bits_per_sample; + u32 sample_rate; + u16 is_signed; + u16 interleaved; +}; + +struct asm_adpcm_cfg { + u16 ch_cfg; + u16 bits_per_sample; + u32 sample_rate; + u32 block_size; +}; + +struct asm_yadpcm_cfg { + u16 ch_cfg; + u16 bits_per_sample; + u32 sample_rate; +}; + +struct asm_midi_cfg { + u32 nMode; +}; + +struct asm_wma_cfg { + u16 format_tag; + u16 ch_cfg; + u32 sample_rate; + u32 avg_bytes_per_sec; + u16 block_align; + u16 valid_bits_per_sample; + u32 ch_mask; + u16 encode_opt; + u16 adv_encode_opt; + u32 adv_encode_opt2; + u32 drc_peak_ref; + u32 drc_peak_target; + u32 drc_ave_ref; + u32 drc_ave_target; +}; + +struct asm_wmapro_cfg { + u16 format_tag; + u16 ch_cfg; + u32 sample_rate; + u32 avg_bytes_per_sec; + u16 block_align; + u16 valid_bits_per_sample; + u32 ch_mask; + u16 encode_opt; + u16 adv_encode_opt; + u32 adv_encode_opt2; + u32 drc_peak_ref; + u32 drc_peak_target; + u32 drc_ave_ref; + u32 drc_ave_target; +}; + +struct asm_aac_cfg { + u16 format; + u16 aot; + u16 ep_config; + u16 section_data_resilience; + u16 scalefactor_data_resilience; + u16 spectral_data_resilience; + u16 ch_cfg; + u16 reserved; + u32 sample_rate; +}; + +struct asm_flac_cfg { + u16 stream_info_present; + u16 min_blk_size; + u16 max_blk_size; + u16 ch_cfg; + u16 sample_size; + u16 sample_rate; + u16 md5_sum; + u32 ext_sample_rate; + u32 min_frame_size; + u32 max_frame_size; +}; + +struct asm_vorbis_cfg { + u32 ch_cfg; + u32 bit_rate; + u32 min_bit_rate; + u32 max_bit_rate; + u16 bit_depth_pcm_sample; + u16 bit_stream_format; +}; + +struct asm_aac_read_cfg { + u32 bitrate; + u32 enc_mode; + u16 format; + u16 ch_cfg; + u32 sample_rate; +}; + +struct asm_amrnb_read_cfg { + u16 mode; + u16 dtx_mode; +}; + +struct asm_evrc_read_cfg { + u16 max_rate; + u16 min_rate; + u16 rate_modulation_cmd; + u16 reserved; +}; + +struct asm_qcelp13_read_cfg { + u16 max_rate; + u16 min_rate; + u16 reduced_rate_level; + u16 rate_modulation_cmd; +}; + +struct asm_sbc_read_cfg { + u32 subband; + u32 block_len; + u32 ch_mode; + u32 alloc_method; + u32 bit_rate; + u32 sample_rate; +}; + +struct asm_sbc_bitrate { + u32 bitrate; +}; + +struct asm_immed_decode { + u32 mode; +}; + +struct asm_sbr_ps { + u32 enable; +}; + +struct asm_encode_cfg_blk { + u32 frames_per_buf; + u32 format_id; + u32 cfg_size; + union { + struct asm_pcm_cfg pcm; + struct asm_aac_read_cfg aac; + struct asm_amrnb_read_cfg amrnb; + struct asm_evrc_read_cfg evrc; + struct asm_qcelp13_read_cfg qcelp13; + struct asm_sbc_read_cfg sbc; + } __attribute__((packed)) cfg; +}; + +struct asm_frame_meta_info { + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +}; + +/* Stream level commands */ +#define ASM_STREAM_CMD_OPEN_READ 0x00010BCB +struct asm_stream_cmd_open_read { + struct apr_hdr hdr; + u32 uMode; + u32 src_endpoint; + u32 pre_proc_top; + u32 format; +} __attribute__((packed)); + +/* Supported formats */ +#define LINEAR_PCM 0x00010BE5 +#define DTMF 0x00010BE6 +#define ADPCM 0x00010BE7 +#define YADPCM 0x00010BE8 +#define MP3 0x00010BE9 +#define MPEG4_AAC 0x00010BEA +#define AMRNB_FS 0x00010BEB +#define V13K_FS 0x00010BED +#define EVRC_FS 0x00010BEE +#define EVRCB_FS 0x00010BEF +#define EVRCWB_FS 0x00010BF0 +#define MIDI 0x00010BF1 +#define SBC 0x00010BF2 +#define WMA_V10PRO 0x00010BF3 +#define WMA_V9 0x00010BF4 +#define AMR_WB_PLUS 0x00010BF5 +#define AC3_DECODER 0x00010BF6 +#define G711_ALAW_FS 0x00010BF7 +#define G711_MLAW_FS 0x00010BF8 +#define G711_PCM_FS 0x00010BF9 + +#define ASM_ENCDEC_SBCRATE 0x00010C13 +#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14 +#define ASM_ENCDEC_CFG_BLK 0x00010C2C + +#define ASM_ENCDEC_SBCRATE 0x00010C13 +#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14 +#define ASM_ENCDEC_CFG_BLK 0x00010C2C + +#define ASM_STREAM_CMD_OPEN_WRITE 0x00010BCA +struct asm_stream_cmd_open_write { + struct apr_hdr hdr; + u32 uMode; + u16 sink_endpoint; + u16 stream_handle; + u32 post_proc_top; + u32 format; +} __attribute__((packed)); + +#define ASM_STREAM_CMD_OPEN_READWRITE 0x00010BCC + +struct asm_stream_cmd_open_read_write { + struct apr_hdr hdr; + u32 uMode; + u32 post_proc_top; + u32 write_format; + u32 read_format; +} __attribute__((packed)); + +#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10 +#define ASM_STREAM_CMD_GET_ENCDEC_PARAM 0x00010C11 +#define ASM_ENCDEC_CFG_BLK_ID 0x00010C2C +#define ASM_ENABLE_SBR_PS 0x00010C63 +struct asm_stream_cmd_encdec_cfg_blk{ + struct apr_hdr hdr; + u32 param_id; + u32 param_size; + struct asm_encode_cfg_blk enc_blk; +} __attribute__((packed)); + +struct asm_stream_cmd_encdec_sbc_bitrate{ + struct apr_hdr hdr; + u32 param_id; + struct asm_sbc_bitrate sbc_bitrate; +} __attribute__((packed)); + +struct asm_stream_cmd_encdec_immed_decode{ + struct apr_hdr hdr; + u32 param_id; + u32 param_size; + struct asm_immed_decode dec; +} __attribute__((packed)); + +struct asm_stream_cmd_encdec_sbr{ + struct apr_hdr hdr; + u32 param_id; + u32 param_size; + struct asm_sbr_ps sbr_ps; +} __attribute__((packed)); + +#define ASM_STREAM _CMD_ADJUST_SAMPLES 0x00010C0A +struct asm_stream_cmd_adjust_samples{ + struct apr_hdr hdr; + u16 nsamples; + u16 reserved; +} __attribute__((packed)); + +#define ASM_STREAM_CMD_TAP_POPP_PCM 0x00010BF9 +struct asm_stream_cmd_tap_popp_pcm{ + struct apr_hdr hdr; + u16 enable; + u16 reserved; + u32 module_id; +} __attribute__((packed)); + +/* Session Level commands */ +#define ASM_SESSION_CMD_MEMORY_MAP 0x00010C32 +struct asm_stream_cmd_memory_map{ + struct apr_hdr hdr; + u32 buf_add; + u32 buf_size; + u16 mempool_id; + u16 reserved; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_MEMORY_UNMAP 0x00010C33 +struct asm_stream_cmd_memory_unmap{ + struct apr_hdr hdr; + u32 buf_add; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_MEMORY_MAP_REGIONS 0x00010C45 +struct asm_memory_map_regions{ + u32 phys; + u32 buf_size; +} __attribute__((packed)); + +struct asm_stream_cmd_memory_map_regions{ + struct apr_hdr hdr; + u16 mempool_id; + u16 nregions; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS 0x00010C46 +struct asm_memory_unmap_regions{ + u32 phys; +} __attribute__((packed)); + +struct asm_stream_cmd_memory_unmap_regions{ + struct apr_hdr hdr; + u16 nregions; + u16 reserved; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_RUN 0x00010BD2 +struct asm_stream_cmd_run{ + struct apr_hdr hdr; + u32 flags; + u32 msw_ts; + u32 lsw_ts; +} __attribute__((packed)); + +/* Session level events */ +#define ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS 0x00010BD5 +struct asm_stream_cmd_reg_rx_underflow_event{ + struct apr_hdr hdr; + u16 enable; + u16 reserved; +} __attribute__((packed)); + +#define ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS 0x00010BD6 +struct asm_stream_cmd_reg_tx_overflow_event{ + struct apr_hdr hdr; + u16 enable; + u16 reserved; +} __attribute__((packed)); + +/* Data Path commands */ +#define ASM_DATA_CMD_WRITE 0x00010BD9 +struct asm_stream_cmd_write{ + struct apr_hdr hdr; + u32 buf_add; + u32 avail_bytes; + u32 uid; + u32 msw_ts; + u32 lsw_ts; + u32 uflags; +} __attribute__((packed)); + +#define ASM_DATA_CMD_READ 0x00010BDA +struct asm_stream_cmd_read{ + struct apr_hdr hdr; + u32 buf_add; + u32 buf_size; + u32 uid; +} __attribute__((packed)); + +#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC +#define ASM_DATA_EVENT_MEDIA_FORMAT_UPDATE 0x00010BDE +struct asm_stream_media_format_update{ + struct apr_hdr hdr; + u32 format; + u32 cfg_size; + union { + struct asm_pcm_cfg pcm_cfg; + struct asm_adpcm_cfg adpcm_cfg; + struct asm_yadpcm_cfg yadpcm_cfg; + struct asm_midi_cfg midi_cfg; + struct asm_wma_cfg wma_cfg; + struct asm_wmapro_cfg wmapro_cfg; + struct asm_aac_cfg aac_cfg; + struct asm_flac_cfg flac_cfg; + struct asm_vorbis_cfg vorbis_cfg; + } __attribute__((packed)) write_cfg; +} __attribute__((packed)); + + +/* Command Responses */ +#define ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM 0x00010C12 +struct asm_stream_cmdrsp_get_readwrite_param{ + struct apr_hdr hdr; + u32 status; + u32 param_id; + u16 param_size; + u16 padding; + union { + struct asm_sbc_bitrate sbc_bitrate; + struct asm_immed_decode aac_dec; + } __attribute__((packed)) read_write_cfg; +} __attribute__((packed)); + + +#define ASM_SESSION_CMDRSP_GET_SESSION_TIME 0x00010BD8 +struct asm_stream_cmdrsp_get_session_time{ + struct apr_hdr hdr; + u32 status; + u32 msw_ts; + u32 lsw_ts; +} __attribute__((packed)); + +#define ASM_DATA_EVENT_WRITE_DONE 0x00010BDF +struct asm_data_event_write_done{ + u32 buf_add; + u32 status; +} __attribute__((packed)); + +#define ASM_DATA_EVENT_READ_DONE 0x00010BE0 +struct asm_data_event_read_done{ + u32 status; + u32 buffer_add; + u32 enc_frame_size; + u32 offset; + u32 msw_ts; + u32 lsw_ts; + u32 flags; + u32 num_frames; + u32 id; +} __attribute__((packed)); + + +/* service level events */ + +#define ASM_SERVICE_CMDRSP_GET_STREAM_HANDLES 0x00010C1B +struct asm_svc_cmdrsp_get_strm_handles{ + struct apr_hdr hdr; + u32 num_handles; + u32 stream_handles; +} __attribute__((packed)); + + +#define ASM_SERVICE_CMDRSP_GET_WALLCLOCK_TIME 0x00010C1A +struct asm_svc_cmdrsp_get_wallclock_time{ + struct apr_hdr hdr; + u32 status; + u32 msw_ts; + u32 lsw_ts; +} __attribute__((packed)); + +/* + * Error code +*/ +#define ADSP_EOK 0x00000000 /* Success / completed / no errors. */ +#define ADSP_EFAILED 0x00000001 /* General failure. */ +#define ADSP_EBADPARAM 0x00000002 /* Bad operation parameter(s). */ +#define ADSP_EUNSUPPORTED 0x00000003 /* Unsupported routine/operation. */ +#define ADSP_EVERSION 0x00000004 /* Unsupported version. */ +#define ADSP_EUNEXPECTED 0x00000005 /* Unexpected problem encountered. */ +#define ADSP_EPANIC 0x00000006 /* Unhandled problem occurred. */ +#define ADSP_ENORESOURCE 0x00000007 /* Unable to allocate resource(s). */ +#define ADSP_EHANDLE 0x00000008 /* Invalid handle. */ +#define ADSP_EALREADY 0x00000009 /* Operation is already processed. */ +#define ADSP_ENOTREADY 0x0000000A /* Operation not ready to be processed*/ +#define ADSP_EPENDING 0x0000000B /* Operation is pending completion*/ +#define ADSP_EBUSY 0x0000000C /* Operation could not be accepted or + processed. */ +#define ADSP_EABORTED 0x0000000D /* Operation aborted due to an error. */ +#define ADSP_EPREEMPTED 0x0000000E /* Operation preempted by higher priority*/ +#define ADSP_ECONTINUE 0x0000000F /* Operation requests intervention + to complete. */ +#define ADSP_EIMMEDIATE 0x00000010 /* Operation requests immediate + intervention to complete. */ +#define ADSP_ENOTIMPL 0x00000011 /* Operation is not implemented. */ +#define ADSP_ENEEDMORE 0x00000012 /* Operation needs more data or resources*/ + +#endif /*_APR_AUDIO_H_*/ diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h b/arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h new file mode 100644 index 00000000000..d8ea0d45326 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/audio_dev_ctl.h @@ -0,0 +1,240 @@ +/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6_V2_SNDDEV_H +#define __MACH_QDSP6_V2_SNDDEV_H +#include + +#define AUDIO_DEV_CTL_MAX_DEV 64 +#define DIR_TX 2 +#define DIR_RX 1 + +#define DEVICE_IGNORE 0xffffffff +#define SESSION_IGNORE 0x0UL + +/* 8 concurrent sessions with Q6 possible, session:0 + reserved in DSP */ +#define MAX_SESSIONS 0x09 + +/* This represents Maximum bit needed for representing sessions + per clients, MAX_BIT_PER_CLIENT >= MAX_SESSIONS */ +#define MAX_BIT_PER_CLIENT 16 + +#define VOICE_STATE_INVALID 0x0 +#define VOICE_STATE_INCALL 0x1 +#define VOICE_STATE_OFFCALL 0x2 +#define ONE_TO_MANY 1 +#define MANY_TO_ONE 2 + +struct msm_snddev_info { + const char *name; + u32 capability; + u32 copp_id; + u32 acdb_id; + u32 dev_volume; + struct msm_snddev_ops { + int (*open)(struct msm_snddev_info *); + int (*close)(struct msm_snddev_info *); + int (*set_freq)(struct msm_snddev_info *, u32); + int (*enable_sidetone)(struct msm_snddev_info *, u32, uint16_t); + int (*set_device_volume)(struct msm_snddev_info *, u32); + int (*enable_anc)(struct msm_snddev_info *, u32); + } dev_ops; + u8 opened; + void *private_data; + bool state; + u32 sample_rate; + u32 channel_mode; + u32 set_sample_rate; + u64 sessions; + int usage_count; + s32 max_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* [0] is for NB,[1] for WB */ + s32 min_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM]; +}; + +struct msm_volume { + int volume; /* Volume parameter, in % Scale */ + int pan; +}; + +extern struct msm_volume msm_vol_ctl; + +void msm_snddev_register(struct msm_snddev_info *); +void msm_snddev_unregister(struct msm_snddev_info *); +int msm_snddev_devcount(void); +int msm_snddev_query(int dev_id); +unsigned short msm_snddev_route_dec(int popp_id); +unsigned short msm_snddev_route_enc(int enc_id); + +int msm_snddev_set_dec(int popp_id, int copp_id, int set, + int rate, int channel_mode); +int msm_snddev_set_enc(int popp_id, int copp_id, int set, + int rate, int channel_mode); + +int msm_snddev_is_set(int popp_id, int copp_id); +int msm_get_voc_route(u32 *rx_id, u32 *tx_id); +int msm_set_voc_route(struct msm_snddev_info *dev_info, int stream_type, + int dev_id); +int msm_snddev_enable_sidetone(u32 dev_id, u32 enable, uint16_t gain); + +int msm_set_copp_id(int session_id, int copp_id); + +int msm_clear_copp_id(int session_id, int copp_id); + +int msm_clear_session_id(int session_id); + +int msm_reset_all_device(void); + +int msm_clear_all_session(void); + +struct msm_snddev_info *audio_dev_ctrl_find_dev(u32 dev_id); + +void msm_release_voc_thread(void); + +int snddev_voice_set_volume(int vol, int path); + +int msm_get_call_state(void); + +struct auddev_evt_voc_devinfo { + u32 dev_type; /* Rx or Tx */ + u32 acdb_dev_id; /* acdb id of device */ + u32 dev_sample; /* Sample rate of device */ + s32 max_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb (milibel), + [0] is for NB, other for WB */ + s32 min_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb */ + u32 dev_id; /* registered device id */ + u32 dev_port_id; +}; + +struct auddev_evt_audcal_info { + u32 dev_id; + u32 acdb_id; + u32 sample_rate; + u32 dev_type; + u32 sessions; +}; + +union msm_vol_mute { + int vol; + bool mute; +}; + +struct auddev_evt_voc_mute_info { + u32 dev_type; + u32 acdb_dev_id; + union msm_vol_mute dev_vm_val; +}; + +struct auddev_evt_freq_info { + u32 dev_type; + u32 acdb_dev_id; + u32 sample_rate; +}; + +union auddev_evt_data { + struct auddev_evt_voc_devinfo voc_devinfo; + struct auddev_evt_voc_mute_info voc_vm_info; + struct auddev_evt_freq_info freq_info; + u32 routing_id; + s32 session_vol; + s32 voice_state; + struct auddev_evt_audcal_info audcal_info; +}; + +struct message_header { + uint32_t id; + uint32_t data_len; +}; + +#define AUDDEV_EVT_DEV_CHG_VOICE 0x01 /* device change event */ +#define AUDDEV_EVT_DEV_RDY 0x02 /* device ready event */ +#define AUDDEV_EVT_DEV_RLS 0x04 /* device released event */ +#define AUDDEV_EVT_REL_PENDING 0x08 /* device release pending */ +#define AUDDEV_EVT_DEVICE_VOL_MUTE_CHG 0x10 /* device volume changed */ +#define AUDDEV_EVT_START_VOICE 0x20 /* voice call start */ +#define AUDDEV_EVT_END_VOICE 0x40 /* voice call end */ +#define AUDDEV_EVT_STREAM_VOL_CHG 0x80 /* device volume changed */ +#define AUDDEV_EVT_FREQ_CHG 0x100 /* Change in freq */ +#define AUDDEV_EVT_VOICE_STATE_CHG 0x200 /* Change in voice state */ + +#define AUDDEV_CLNT_VOC 0x1 /*Vocoder clients*/ +#define AUDDEV_CLNT_DEC 0x2 /*Decoder clients*/ +#define AUDDEV_CLNT_ENC 0x3 /* Encoder clients */ +#define AUDDEV_CLNT_AUDIOCAL 0x4 /* AudioCalibration client */ + +#define AUDIO_DEV_CTL_MAX_LISTNER 20 /* Max Listeners Supported */ + +struct msm_snd_evt_listner { + uint32_t evt_id; + uint32_t clnt_type; + uint32_t clnt_id; + void *private_data; + void (*auddev_evt_listener)(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data); + struct msm_snd_evt_listner *cb_next; + struct msm_snd_evt_listner *cb_prev; +}; + +struct event_listner { + struct msm_snd_evt_listner *cb; + u32 num_listner; + int state; /* Call state */ /* TODO remove this if not req*/ +}; + +extern struct event_listner event; +int auddev_register_evt_listner(u32 evt_id, u32 clnt_type, u32 clnt_id, + void (*listner)(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data), + void *private_data); +int auddev_unregister_evt_listner(u32 clnt_type, u32 clnt_id); +void mixer_post_event(u32 evt_id, u32 dev_id); +void broadcast_event(u32 evt_id, u32 dev_id, u64 session_id); +int auddev_cfg_tx_copp_topology(int session_id, int cfg); +int msm_snddev_request_freq(int *freq, u32 session_id, + u32 capability, u32 clnt_type); +int msm_snddev_withdraw_freq(u32 session_id, + u32 capability, u32 clnt_type); +int msm_device_is_voice(int dev_id); +int msm_get_voc_freq(int *tx_freq, int *rx_freq); +int msm_snddev_get_enc_freq(int session_id); +int msm_set_voice_vol(int dir, s32 volume); +int msm_set_voice_mute(int dir, int mute); +int msm_get_voice_state(void); +int msm_enable_incall_recording(int popp_id, int rec_mode, int rate, + int channel_mode); +int msm_disable_incall_recording(uint32_t popp_id, uint32_t rec_mode); +void msm_set_voc_freq(int tx_freq, int rx_freq); + +struct dev_ctrl_ops { + int (*support_opendsp) (void); +}; + +void htc_8x60_register_dev_ctrl_ops(struct dev_ctrl_ops *ops); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h b/arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h new file mode 100644 index 00000000000..0e977557f9d --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/q6afe.h @@ -0,0 +1,67 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __Q6AFE_H__ +#define __Q6AFE_H__ +#include + +#define MSM_AFE_MONO 0 +#define MSM_AFE_MONO_RIGHT 1 +#define MSM_AFE_MONO_LEFT 2 +#define MSM_AFE_STEREO 3 + +enum { + IDX_PRIMARY_I2S_RX = 0, + IDX_PRIMARY_I2S_TX = 1, + IDX_PCM_RX = 2, + IDX_PCM_TX = 3, + IDX_SECONDARY_I2S_RX = 4, + IDX_SECONDARY_I2S_TX = 5, + IDX_MI2S_RX = 6, + IDX_MI2S_TX = 7, + IDX_HDMI_RX = 8, + IDX_RSVD_2 = 9, + IDX_RSVD_3 = 10, + IDX_DIGI_MIC_TX = 11, + IDX_VOICE_RECORD_RX = 12, + IDX_VOICE_RECORD_TX = 13, + IDX_VOICE_PLAYBACK_TX = 14, + AFE_MAX_PORTS +}; + + +int afe_open(u16 port_id, union afe_port_config *afe_config, int rate); +int afe_close(int port_id); +int afe_loopback(u16 enable, u16 rx_port, u16 tx_port); +int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain); +int afe_loopback_gain(u16 port_id, u16 volume); +int afe_validate_port(u16 port_id); +int afe_get_port_index(u16 port_id); +int afe_start_pseudo_port(u16 port_id); +int afe_stop_pseudo_port(u16 port_id); + +#endif /* __Q6AFE_H__ */ diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h b/arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h new file mode 100644 index 00000000000..afb037726a3 --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/q6asm.h @@ -0,0 +1,264 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __Q6_ASM_H__ +#define __Q6_ASM_H__ + +#include + +#define IN 0x000 +#define OUT 0x001 +#define CH_MODE_MONO 0x001 +#define CH_MODE_STEREO 0x002 + +#define FORMAT_LINEAR_PCM 0x0000 +#define FORMAT_DTMF 0x0001 +#define FORMAT_ADPCM 0x0002 +#define FORMAT_YADPCM 0x0003 +#define FORMAT_MP3 0x0004 +#define FORMAT_MPEG4_AAC 0x0005 +#define FORMAT_AMRNB 0x0006 +#define FORMAT_AMRWB 0x0007 +#define FORMAT_V13K 0x0008 +#define FORMAT_EVRC 0x0009 +#define FORMAT_EVRCB 0x000a +#define FORMAT_EVRCWB 0x000b +#define FORMAT_MIDI 0x000c +#define FORMAT_SBC 0x000d +#define FORMAT_WMA_V10PRO 0x000e +#define FORMAT_WMA_V9 0x000f +#define FORMAT_AMR_WB_PLUS 0x0010 + +#define ENCDEC_SBCBITRATE 0x0001 +#define ENCDEC_IMMEDIATE_DECODE 0x0002 +#define ENCDEC_CFG_BLK 0x0003 + +#define CMD_PAUSE 0x0001 +#define CMD_FLUSH 0x0002 +#define CMD_EOS 0x0003 +#define CMD_CLOSE 0x0004 + +/* bit 0:1 represents priority of stream */ +#define STREAM_PRIORITY_NORMAL 0x0000 +#define STREAM_PRIORITY_LOW 0x0001 +#define STREAM_PRIORITY_HIGH 0x0002 + +/* bit 4 represents META enable of encoded data buffer */ +#define BUFFER_META_ENABLE 0x0010 + +#define ASYNC_IO_MODE 0x0002 +#define SYNC_IO_MODE 0x0001 +#define NO_TIMESTAMP 0xFF00 +#define SET_TIMESTAMP 0x0000 + +#define SOFT_PAUSE_ENABLE 1 +#define SOFT_PAUSE_DISABLE 0 + +#define SESSION_MAX 0x08 + +typedef void (*app_cb)(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv); + +struct audio_buffer { + dma_addr_t phys; + void *data; + uint32_t used; + uint32_t size;/* size of buffer */ + uint32_t actual_size; /* actual number of bytes read by DSP */ +}; + +struct audio_aio_write_param { + unsigned long paddr; + uint32_t uid; + uint32_t len; + uint32_t msw_ts; + uint32_t lsw_ts; + uint32_t flags; +}; + +struct audio_aio_read_param { + unsigned long paddr; + uint32_t len; + uint32_t uid; +}; + +struct audio_port_data { + struct audio_buffer *buf; + uint32_t max_buf_cnt; + uint32_t dsp_buf; + uint32_t cpu_buf; + /* read or write locks */ + struct mutex lock; + spinlock_t dsp_lock; +}; + +struct audio_client { + int session; + /* idx:1 out port, 0: in port*/ + struct audio_port_data port[2]; + + struct apr_svc *apr; + struct mutex cmd_lock; + + atomic_t cmd_state; + atomic_t time_flag; + wait_queue_head_t cmd_wait; + wait_queue_head_t time_wait; + + app_cb cb; + void *priv; + uint32_t io_mode; + uint64_t time_stamp; +}; + +void q6asm_audio_client_free(struct audio_client *ac); + +struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv); + +int q6asm_audio_client_buf_alloc(unsigned int dir/* 1:Out,0:In */, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt); +int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir + /* 1:Out,0:In */, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt); + +int q6asm_audio_client_buf_free_contiguous(unsigned int dir, + struct audio_client *ac); + +int q6asm_open_read(struct audio_client *ac, uint32_t format); + +int q6asm_open_write(struct audio_client *ac, uint32_t format); + +int q6asm_open_read_write(struct audio_client *ac, + uint32_t rd_format, + uint32_t wr_format); + +int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags); + +int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags); + +int q6asm_async_write(struct audio_client *ac, + struct audio_aio_write_param *param); + +int q6asm_async_read(struct audio_client *ac, + struct audio_aio_read_param *param); + +int q6asm_read(struct audio_client *ac); +int q6asm_read_nolock(struct audio_client *ac); + +int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, + int dir, uint32_t bufsz, uint32_t bufcnt); + +int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, + int dir); + +int q6asm_run(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts); + +int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts); + +int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable); + +int q6asm_cmd(struct audio_client *ac, int cmd); + +int q6asm_cmd_nowait(struct audio_client *ac, int cmd); + +void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, + uint32_t *size, uint32_t *idx); + +int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac); + +/* File format specific configurations to be added below */ + +int q6asm_enc_cfg_blk_aac(struct audio_client *ac, + uint32_t frames_per_buf, + uint32_t sample_rate, uint32_t channels, + uint32_t bit_rate, + uint32_t mode, uint32_t format); + +int q6asm_enc_cfg_blk_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels); + +int q6asm_enable_sbrps(struct audio_client *ac, + uint32_t sbr_ps); + +int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t reduced_rate_level, uint16_t rate_modulation_cmd); + +int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t rate_modulation_cmd); + +int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t band_mode, uint16_t dtx_enable); + +int q6asm_media_format_block_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels); + +int q6asm_media_format_block_aac(struct audio_client *ac, + struct asm_aac_cfg *cfg); + +int q6asm_media_format_block_wma(struct audio_client *ac, + void *cfg); + +int q6asm_media_format_block_wmapro(struct audio_client *ac, + void *cfg); + +/* PP specific */ +int q6asm_equalizer(struct audio_client *ac, void *eq); + +/* Send Volume Command */ +int q6asm_set_volume(struct audio_client *ac, int volume); + +/* Set SoftPause Params */ +int q6asm_set_softpause(struct audio_client *ac, + struct asm_softpause_params *param); + +/* Send left-right channel gain */ +int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain); + +/* Enable Mute/unmute flag */ +int q6asm_set_mute(struct audio_client *ac, int muteflag); + +uint64_t q6asm_get_session_time(struct audio_client *ac); + +/* Client can set the IO mode to either AIO/SIO mode */ +int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode); + +#ifdef CONFIG_MSM8X60_RTAC +/* Get Service ID for APR communication */ +int q6asm_get_apr_service_id(int session_id); +#endif + +#endif /* __Q6_ASM_H__ */ diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h b/arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h new file mode 100644 index 00000000000..25d4ca3a65c --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/q6voice.h @@ -0,0 +1,757 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __QDSP6VOICE_H__ +#define __QDSP6VOICE_H__ + +#include + +/* Device Event */ +#define DEV_CHANGE_READY 0x1 + +#define VOICE_CALL_START 0x1 +#define VOICE_CALL_END 0 + +#define VOICE_DEV_ENABLED 0x1 +#define VOICE_DEV_DISABLED 0 + +#define MAX_VOC_PKT_SIZE 322 + +#define SESSION_NAME_LEN 20 + +struct voice_header { + uint32_t id; + uint32_t data_len; +}; + +struct voice_init { + struct voice_header hdr; + void *cb_handle; +}; + + +/* Device information payload structure */ + +struct device_data { + uint32_t dev_acdb_id; + uint32_t volume; /* in percentage */ + uint32_t mute; + uint32_t sample; + uint32_t enabled; + uint32_t dev_id; + uint32_t dev_port_id; +}; + +enum { + VOC_INIT = 0, + VOC_RUN, + VOC_CHANGE, + VOC_RELEASE, +}; + +/* TO MVM commands */ +#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110FE +/* Create a new full control MVM session. */ + +#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IMVM_CMD_ATTACH_STREAM 0x0001123C +/* Attach a stream to the MVM. */ + +#define VSS_IMVM_CMD_DETACH_STREAM 0x0001123D +/* Detach a stream from the MVM. */ + +#define VSS_IMVM_CMD_START_VOICE 0x00011190 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IMVM_CMD_STOP_VOICE 0x00011192 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ISTREAM_CMD_ATTACH_VOCPROC 0x000110F8 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ISTREAM_CMD_DETACH_VOCPROC 0x000110F9 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + + +#define VSS_ISTREAM_CMD_SET_TTY_MODE 0x00011196 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ICOMMON_CMD_SET_NETWORK 0x0001119C +/* Set the network type. */ + +#define VSS_ICOMMON_CMD_SET_VOICE_TIMING 0x000111E0 +/* Set the voice timing parameters. */ + +struct vss_imvm_cmd_create_control_session_t { + char name[SESSION_NAME_LEN]; /* + * A variable-sized stream name. + * + * The stream name size is the payload size minus the size of the other + * fields. + */ +} __packed; + +struct vss_istream_cmd_set_tty_mode_t { + uint32_t mode; + /**< + * TTY mode. + * + * 0 : TTY disabled + * 1 : HCO + * 2 : VCO + * 3 : FULL + */ +} __attribute__((packed)); + +struct vss_istream_cmd_attach_vocproc_t { + uint16_t handle; + /**< Handle of vocproc being attached. */ +} __attribute__((packed)); + +struct vss_istream_cmd_detach_vocproc_t { + uint16_t handle; + /**< Handle of vocproc being detached. */ +} __attribute__((packed)); + +struct vss_imvm_cmd_attach_stream_t { + uint16_t handle; + /* The stream handle to attach. */ +} __attribute__((packed)); + +struct vss_imvm_cmd_detach_stream_t { + uint16_t handle; + /* The stream handle to detach. */ +} __attribute__((packed)); + +struct vss_icommon_cmd_set_network_t { + uint32_t network_id; + /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */ +} __attribute__((packed)); + +struct vss_icommon_cmd_set_voice_timing_t { + uint16_t mode; + /* + * The vocoder frame synchronization mode. + * + * 0 : No frame sync. + * 1 : Hard VFR (20ms Vocoder Frame Reference interrupt). + */ + uint16_t enc_offset; + /* + * The offset in microseconds from the VFR to deliver a Tx vocoder + * packet. The offset should be less than 20000us. + */ + uint16_t dec_req_offset; + /* + * The offset in microseconds from the VFR to request for an Rx vocoder + * packet. The offset should be less than 20000us. + */ + uint16_t dec_offset; + /* + * The offset in microseconds from the VFR to indicate the deadline to + * receive an Rx vocoder packet. The offset should be less than 20000us. + * Rx vocoder packets received after this deadline are not guaranteed to + * be processed. + */ +} __attribute__((packed)); + +struct mvm_attach_vocproc_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_attach_vocproc_t mvm_attach_cvp_handle; +} __attribute__((packed)); + +struct mvm_detach_vocproc_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_detach_vocproc_t mvm_detach_cvp_handle; +} __attribute__((packed)); + +struct mvm_create_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_imvm_cmd_create_control_session_t mvm_session; +} __packed; + +struct mvm_set_tty_mode_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_tty_mode_t tty_mode; +} __attribute__((packed)); + +struct mvm_attach_stream_cmd { + struct apr_hdr hdr; + struct vss_imvm_cmd_attach_stream_t attach_stream; +} __attribute__((packed)); + +struct mvm_detach_stream_cmd { + struct apr_hdr hdr; + struct vss_imvm_cmd_detach_stream_t detach_stream; +} __attribute__((packed)); + +struct mvm_set_network_cmd { + struct apr_hdr hdr; + struct vss_icommon_cmd_set_network_t network; +} __attribute__((packed)); + +struct mvm_set_voice_timing_cmd { + struct apr_hdr hdr; + struct vss_icommon_cmd_set_voice_timing_t timing; +} __attribute__((packed)); + +/* TO CVS commands */ +#define VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x00011140 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110F7 +/* Create a new full control stream session. */ + +#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C + +#define VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA 0x000110FB + +#define VSS_ISTREAM_CMD_SET_MUTE 0x00011022 + +#define VSS_ISTREAM_CMD_SET_MEDIA_TYPE 0x00011186 +/* Set media type on the stream. */ + +#define VSS_ISTREAM_EVT_SEND_ENC_BUFFER 0x00011015 +/* Event sent by the stream to its client to provide an encoded packet. */ + +#define VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER 0x00011017 +/* Event sent by the stream to its client requesting for a decoder packet. + * The client should respond with a VSS_ISTREAM_EVT_SEND_DEC_BUFFER event. + */ + +#define VSS_ISTREAM_EVT_SEND_DEC_BUFFER 0x00011016 +/* Event sent by the client to the stream in response to a + * VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER event, providing a decoder packet. + */ + +#define VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE 0x0001113E +/* Set AMR encoder rate. */ + +#define VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE 0x0001113F +/* Set AMR-WB encoder rate. */ + +#define VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE 0x00011019 +/* Set encoder minimum and maximum rate. */ + +#define VSS_ISTREAM_CMD_SET_ENC_DTX_MODE 0x0001101D +/* Set encoder DTX mode. */ + +#define VSS_ISTREAM_CMD_START_RECORD 0x00011236 +/* Start in-call conversation recording. */ + +#define VSS_ISTREAM_CMD_STOP_RECORD 0x00011237 +/* Stop in-call conversation recording. */ + +#define VSS_ISTREAM_CMD_START_PLAYBACK 0x00011238 +/* Start in-call music delivery on the Tx voice path. */ + +#define VSS_ISTREAM_CMD_STOP_PLAYBACK 0x00011239 +/* Stop the in-call music delivery on the Tx voice path. */ + +struct vss_istream_cmd_create_passive_control_session_t { + char name[SESSION_NAME_LEN]; + /**< + * A variable-sized stream name. + * + * The stream name size is the payload size minus the size of the other + * fields. + */ +} __attribute__((packed)); + +struct vss_istream_cmd_set_mute_t { + uint16_t direction; + /**< + * 0 : TX only + * 1 : RX only + * 2 : TX and Rx + */ + uint16_t mute_flag; + /**< + * Mute, un-mute. + * + * 0 : Silence disable + * 1 : Silence enable + * 2 : CNG enable. Applicable to TX only. If set on RX behavior + * will be the same as 1 + */ +} __attribute__((packed)); + +struct vss_istream_cmd_create_full_control_session_t { + uint16_t direction; + /* + * Stream direction. + * + * 0 : TX only + * 1 : RX only + * 2 : TX and RX + * 3 : TX and RX loopback + */ + uint32_t enc_media_type; + /* Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ + uint32_t dec_media_type; + /* Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ + uint32_t network_id; + /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */ + char name[SESSION_NAME_LEN]; + /* + * A variable-sized stream name. + * + * The stream name size is the payload size minus the size of the other + * fields. + */ +} __attribute__((packed)); + +struct vss_istream_cmd_set_media_type_t { + uint32_t rx_media_id; + /* Set the Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ + uint32_t tx_media_id; + /* Set the Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */ +} __attribute__((packed)); + +struct vss_istream_evt_send_enc_buffer_t { + uint32_t media_id; + /* Media ID of the packet. */ + uint8_t packet_data[MAX_VOC_PKT_SIZE]; + /* Packet data buffer. */ +} __attribute__((packed)); + +struct vss_istream_evt_send_dec_buffer_t { + uint32_t media_id; + /* Media ID of the packet. */ + uint8_t packet_data[MAX_VOC_PKT_SIZE]; + /* Packet data. */ +} __attribute__((packed)); + +struct vss_istream_cmd_voc_amr_set_enc_rate_t { + uint32_t mode; + /* Set the AMR encoder rate. + * + * 0x00000000 : 4.75 kbps + * 0x00000001 : 5.15 kbps + * 0x00000002 : 5.90 kbps + * 0x00000003 : 6.70 kbps + * 0x00000004 : 7.40 kbps + * 0x00000005 : 7.95 kbps + * 0x00000006 : 10.2 kbps + * 0x00000007 : 12.2 kbps + */ +} __attribute__((packed)); + +struct vss_istream_cmd_voc_amrwb_set_enc_rate_t { + uint32_t mode; + /* Set the AMR-WB encoder rate. + * + * 0x00000000 : 6.60 kbps + * 0x00000001 : 8.85 kbps + * 0x00000002 : 12.65 kbps + * 0x00000003 : 14.25 kbps + * 0x00000004 : 15.85 kbps + * 0x00000005 : 18.25 kbps + * 0x00000006 : 19.85 kbps + * 0x00000007 : 23.05 kbps + * 0x00000008 : 23.85 kbps + */ +} __attribute__((packed)); + +struct vss_istream_cmd_cdma_set_enc_minmax_rate_t { + uint16_t min_rate; + /* Set the lower bound encoder rate. + * + * 0x0000 : Blank frame + * 0x0001 : Eighth rate + * 0x0002 : Quarter rate + * 0x0003 : Half rate + * 0x0004 : Full rate + */ + uint16_t max_rate; + /* Set the upper bound encoder rate. + * + * 0x0000 : Blank frame + * 0x0001 : Eighth rate + * 0x0002 : Quarter rate + * 0x0003 : Half rate + * 0x0004 : Full rate + */ +} __attribute__((packed)); + +struct vss_istream_cmd_set_enc_dtx_mode_t { + uint32_t enable; + /* Toggle DTX on or off. + * + * 0 : Disables DTX + * 1 : Enables DTX + */ +} __attribute__((packed)); + +#define VSS_TAP_POINT_NONE 0x00010F78 +/* Indicates no tapping for specified path. */ + +#define VSS_TAP_POINT_STREAM_END 0x00010F79 +/* Indicates that specified path should be tapped at the end of the stream. */ + +struct vss_istream_cmd_start_record_t { + uint32_t rx_tap_point; + /* Tap point to use on the Rx path. Supported values are: + * VSS_TAP_POINT_NONE : Do not record Rx path. + * VSS_TAP_POINT_STREAM_END : Rx tap point is at the end of the stream. + */ + uint32_t tx_tap_point; + /* Tap point to use on the Tx path. Supported values are: + * VSS_TAP_POINT_NONE : Do not record tx path. + * VSS_TAP_POINT_STREAM_END : Tx tap point is at the end of the stream. + */ +} __attribute__((packed)); + +struct cvs_create_passive_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_create_passive_control_session_t cvs_session; +} __attribute__((packed)); + +struct cvs_create_full_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_create_full_control_session_t cvs_session; +} __attribute__((packed)); + +struct cvs_destroy_session_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvs_cache_calibration_data_cmd { + struct apr_hdr hdr; +} __attribute__ ((packed)); + +struct cvs_set_mute_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_mute_t cvs_set_mute; +} __attribute__((packed)); + +struct cvs_set_media_type_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_media_type_t media_type; +} __attribute__((packed)); + +struct cvs_send_dec_buf_cmd { + struct apr_hdr hdr; + struct vss_istream_evt_send_dec_buffer_t dec_buf; +} __attribute__((packed)); + +struct cvs_set_amr_enc_rate_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_voc_amr_set_enc_rate_t amr_rate; +} __attribute__((packed)); + +struct cvs_set_amrwb_enc_rate_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_voc_amrwb_set_enc_rate_t amrwb_rate; +} __attribute__((packed)); + +struct cvs_set_cdma_enc_minmax_rate_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_cdma_set_enc_minmax_rate_t cdma_rate; +} __attribute__((packed)); + +struct cvs_set_enc_dtx_mode_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_set_enc_dtx_mode_t dtx_mode; +} __attribute__((packed)); + +struct cvs_start_record_cmd { + struct apr_hdr hdr; + struct vss_istream_cmd_start_record_t rec_mode; +} __attribute__((packed)); + +/* TO CVP commands */ + +#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION 0x000100C3 +/**< Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C + +#define VSS_IVOCPROC_CMD_SET_DEVICE 0x000100C4 + +#define VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA 0x000110E3 + +#define VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE 0x000110E4 + +#define VSS_IVOCPROC_CMD_SET_VP3_DATA 0x000110EB + +#define VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX 0x000110EE + +#define VSS_IVOCPROC_CMD_ENABLE 0x000100C6 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IVOCPROC_CMD_DISABLE 0x000110E1 +/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */ + +#define VSS_IVOCPROC_TOPOLOGY_ID_NONE 0x00010F70 +#define VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS 0x00010F71 +#define VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE 0x00010F72 + +#define VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT 0x00010F77 + +/* Newtwork IDs */ +#define VSS_NETWORK_ID_DEFAULT 0x00010037 +#define VSS_NETWORK_ID_VOIP_NB 0x00011240 +#define VSS_NETWORK_ID_VOIP_WB 0x00011241 +#define VSS_NETWORK_ID_VOIP_WV 0x00011242 + +/* Media types */ +#define VSS_MEDIA_ID_EVRC_MODEM 0x00010FC2 +/* 80-VF690-47 CDMA enhanced variable rate vocoder modem format. */ +#define VSS_MEDIA_ID_AMR_NB_MODEM 0x00010FC6 +/* 80-VF690-47 UMTS AMR-NB vocoder modem format. */ +#define VSS_MEDIA_ID_AMR_WB_MODEM 0x00010FC7 +/* 80-VF690-47 UMTS AMR-WB vocoder modem format. */ +#define VSS_MEDIA_ID_PCM_NB 0x00010FCB +/* Linear PCM (16-bit, little-endian). */ +#define VSS_MEDIA_ID_G711_ALAW 0x00010FCD +/* G.711 a-law (contains two 10ms vocoder frames). */ +#define VSS_MEDIA_ID_G711_MULAW 0x00010FCE +/* G.711 mu-law (contains two 10ms vocoder frames). */ +#define VSS_MEDIA_ID_G729 0x00010FD0 +/* G.729AB (contains two 10ms vocoder frames. */ + +#define VOICE_CMD_SET_PARAM 0x00011006 +#define VOICE_CMD_GET_PARAM 0x00011007 +#define VOICE_EVT_GET_PARAM_ACK 0x00011008 + +struct vss_ivocproc_cmd_create_full_control_session_t { + uint16_t direction; + /* + * stream direction. + * 0 : TX only + * 1 : RX only + * 2 : TX and RX + */ + uint32_t tx_port_id; + /* + * TX device port ID which vocproc will connect to. If not supplying a + * port ID set to VSS_IVOCPROC_PORT_ID_NONE. + */ + uint32_t tx_topology_id; + /* + * Tx leg topology ID. If not supplying a topology ID set to + * VSS_IVOCPROC_TOPOLOGY_ID_NONE. + */ + uint32_t rx_port_id; + /* + * RX device port ID which vocproc will connect to. If not supplying a + * port ID set to VSS_IVOCPROC_PORT_ID_NONE. + */ + uint32_t rx_topology_id; + /* + * Rx leg topology ID. If not supplying a topology ID set to + * VSS_IVOCPROC_TOPOLOGY_ID_NONE. + */ + int32_t network_id; + /* + * Network ID. (Refer to VSS_NETWORK_ID_XXX). If not supplying a network + * ID set to VSS_NETWORK_ID_DEFAULT. + */ +} __attribute__((packed)); + +struct vss_ivocproc_cmd_set_device_t { + uint32_t tx_port_id; + /**< + * TX device port ID which vocproc will connect to. + * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port. + */ + uint32_t tx_topology_id; + /**< + * TX leg topology ID. + * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any + * pre/post-processing blocks and is pass-through. + */ + int32_t rx_port_id; + /**< + * RX device port ID which vocproc will connect to. + * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port. + */ + uint32_t rx_topology_id; + /**< + * RX leg topology ID. + * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any + * pre/post-processing blocks and is pass-through. + */ +} __attribute__((packed)); + +struct vss_ivocproc_cmd_set_volume_index_t { + uint16_t vol_index; + /**< + * Volume index utilized by the vocproc to index into the volume table + * provided in VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE and set + * volume on the VDSP. + */ +} __attribute__((packed)); + +struct cvp_create_full_ctl_session_cmd { + struct apr_hdr hdr; + struct vss_ivocproc_cmd_create_full_control_session_t cvp_session; +} __attribute__ ((packed)); + +struct cvp_command { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_set_device_cmd { + struct apr_hdr hdr; + struct vss_ivocproc_cmd_set_device_t cvp_set_device; +} __attribute__ ((packed)); + +struct cvp_cache_calibration_data_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_cache_volume_calibration_table_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_set_vp3_data_cmd { + struct apr_hdr hdr; +} __attribute__((packed)); + +struct cvp_set_rx_volume_index_cmd { + struct apr_hdr hdr; + struct vss_ivocproc_cmd_set_volume_index_t cvp_set_vol_idx; +} __attribute__((packed)); + +/* CB for up-link packets. */ +typedef void (*ul_cb_fn)(uint8_t *voc_pkt, + uint32_t pkt_len, + void *private_data); + +/* CB for down-link packets. */ +typedef void (*dl_cb_fn)(uint8_t *voc_pkt, + uint32_t *pkt_len, + void *private_data); + + +struct mvs_driver_info { + uint32_t media_type; + uint32_t rate; + uint32_t network_type; + uint32_t dtx_mode; + ul_cb_fn ul_cb; + dl_cb_fn dl_cb; + void *private_data; +}; + +struct incall_rec_info { + uint32_t pending; + uint32_t rec_mode; +}; + +struct incall_music_info { + uint32_t pending; + uint32_t playing; +}; + +struct voice_data { + int voc_state;/*INIT, CHANGE, RELEASE, RUN */ + uint32_t voc_path; + uint32_t adsp_version; + + wait_queue_head_t mvm_wait; + wait_queue_head_t cvs_wait; + wait_queue_head_t cvp_wait; + + uint32_t device_events; + + /* cache the values related to Rx and Tx */ + struct device_data dev_rx; + struct device_data dev_tx; + + /* these default values are for all devices */ + uint32_t default_mute_val; + uint32_t default_vol_val; + uint32_t default_sample_val; + + /* call status */ + int v_call_status; /* Start or End */ + + /* APR to MVM in the modem */ + void *apr_mvm; + /* APR to CVS in the modem */ + void *apr_cvs; + /* APR to CVP in the modem */ + void *apr_cvp; + + /* APR to MVM in the Q6 */ + void *apr_q6_mvm; + /* APR to CVS in the Q6 */ + void *apr_q6_cvs; + /* APR to CVP in the Q6 */ + void *apr_q6_cvp; + + u32 mvm_state; + u32 cvs_state; + u32 cvp_state; + + /* Handle to MVM in the modem */ + u16 mvm_handle; + /* Handle to CVS in the modem */ + u16 cvs_handle; + /* Handle to CVP in the modem */ + u16 cvp_handle; + + /* Handle to MVM in the Q6 */ + u16 mvm_q6_handle; + /* Handle to CVS in the Q6 */ + u16 cvs_q6_handle; + /* Handle to CVP in the Q6 */ + u16 cvp_q6_handle; + + struct mutex lock; + + struct mvs_driver_info mvs_info; + + struct incall_rec_info rec_info; + + struct incall_music_info music_info; +}; + +int voice_set_voc_path_full(uint32_t set); + +void voice_register_mvs_cb(ul_cb_fn ul_cb, + dl_cb_fn dl_cb, + void *private_data); + +void voice_config_vocoder(uint32_t media_type, + uint32_t rate, + uint32_t network_type, + uint32_t dtx_mode); + +int voice_start_record(uint32_t rec_mode, uint32_t set); + +int voice_start_playback(uint32_t set); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h new file mode 100644 index 00000000000..e07ad02505c --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_ecodec.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6V2_SNDDEV_ECODEC_H +#define __MACH_QDSP6V2_SNDDEV_ECODEC_H +#include + +struct snddev_ecodec_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + u8 channel_mode; + u32 conf_pcm_ctl_val; + u32 conf_aux_codec_intf; + u32 conf_data_format_padding_val; +}; + +struct q6v2audio_ecodec_ops { + void (*bt_sco_enable)(int en); +}; + +void htc_8x60_register_ecodec_ops(struct q6v2audio_ecodec_ops *ops); +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h new file mode 100644 index 00000000000..cdc81a1f03f --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_hdmi.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6_V2_SNDDEV_HDMI_H +#define __MACH_QDSP6_V2_SNDDEV_HDMI_H + +struct snddev_hdmi_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + u32 acdb_id; /* Audio Cal purpose */ + u8 channel_mode; + u32 default_sample_rate; +}; +#endif diff --git a/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h new file mode 100644 index 00000000000..06f31f569dd --- /dev/null +++ b/arch/arm/mach-msm/include/mach/qdsp6v3/snddev_icodec.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6V2_SNDDEV_ICODEC_H +#define __MACH_QDSP6V2_SNDDEV_ICODEC_H +#include +#include +#include +#include + +struct snddev_icodec_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + /* Adie profile */ + struct adie_codec_dev_profile *profile; + /* Afe setting */ + u8 channel_mode; + u32 default_sample_rate; + void (*pamp_on) (int on); + void (*voltage_on) (int on); + u32 dev_vol_type; + u32 aic3254_id; + u32 aic3254_voc_id; + u32 default_aic3254_id; +}; + +/* Context for each internal codec sound device */ +struct snddev_icodec_state { + struct snddev_icodec_data *data; + struct adie_codec_path *adie_path; + u32 sample_rate; + u32 enabled; +}; + +struct q6v2audio_analog_ops { + void (*speaker_enable)(int en); + void (*headset_enable)(int en); + void (*handset_enable)(int en); + void (*bt_sco_enable)(int en); + void (*headset_speaker_enable)(int en); + void (*int_mic_enable)(int en); + void (*back_mic_enable)(int en); + void (*ext_mic_enable)(int en); + void (*stereo_mic_enable)(int en); + void (*usb_headset_enable)(int en); + void (*fm_headset_enable)(int en); + void (*fm_speaker_enable)(int en); + void (*voltage_on) (int on); +}; + +struct q6v2audio_icodec_ops { + int (*support_aic3254) (void); + int (*support_adie) (void); + int (*is_msm_i2s_slave) (void); + int (*support_aic3254_use_mclk) (void); +}; + +struct q6v2audio_aic3254_ops { + void (*aic3254_set_mode)(int config, int mode); +}; + +struct aic3254_info { + u32 dev_id; + u32 path_id; +}; + + +void htc_8x60_register_analog_ops(struct q6v2audio_analog_ops *ops); +void htc_8x60_register_aic3254_ops(struct q6v2audio_aic3254_ops *ops); +int update_aic3254_info(struct aic3254_info *info); +void htc_8x60_register_icodec_ops(struct q6v2audio_icodec_ops *ops); +#endif diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h index 790430d985a..03f6cc81511 100644 --- a/arch/arm/mach-msm/include/mach/scm.h +++ b/arch/arm/mach-msm/include/mach/scm.h @@ -21,6 +21,7 @@ #define SCM_SVC_SSD 0x7 #define SCM_SVC_FUSE 0x8 #define SCM_SVC_PWR 0x9 +#define SCM_SVC_CP 0xC #define SCM_SVC_TZSCHEDULER 0xFC #define SCM_SVC_OEM 0xFE diff --git a/arch/arm/mach-msm/include/mach/usb_bam.h b/arch/arm/mach-msm/include/mach/usb_bam.h new file mode 100644 index 00000000000..4caa71bb3de --- /dev/null +++ b/arch/arm/mach-msm/include/mach/usb_bam.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _USB_BAM_H_ +#define _USB_BAM_H_ + +/** + * Connect USB-to-Periperal SPS connection. + * + * This function returns the allocated pipes number. + * + * @idx - Connection index. + * + * @src_pipe_idx - allocated pipe index - USB as a + * source (output) + * + * @dst_pipe_idx - allocated pipe index - USB as a + * destination (output) + * + * @return 0 on success, negative value on error + * + */ +#ifdef CONFIG_USB_BAM +int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx); +#else +int usb_bam_connect(u8 idx, u8 *src_pipe_idx, u8 *dst_pipe_idx) +{ + return -ENODEV; +} +#endif +#endif /* _USB_BAM_H_ */ diff --git a/arch/arm/mach-msm/include/mach/usb_bridge.h b/arch/arm/mach-msm/include/mach/usb_bridge.h new file mode 100644 index 00000000000..2b7e754b35c --- /dev/null +++ b/arch/arm/mach-msm/include/mach/usb_bridge.h @@ -0,0 +1,122 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#ifndef __LINUX_USB_BRIDGE_H__ +#define __LINUX_USB_BRIDGE_H__ + +#include +#include + +/* bridge device 0: DUN + * bridge device 1 : Tethered RMNET + */ +#define MAX_BRIDGE_DEVICES 2 + +/*PID 9001*/ +#define DUN_IFACE_NUM 2 +#define TETHERED_RMNET_IFACE_NUM 3 + +struct bridge_ops { + int (*send_pkt)(void *, void *, size_t actual); + void (*send_cbits)(void *, unsigned int); + + /* flow control */ + void (*unthrottle_tx)(void *); +}; + +#define TX_THROTTLED BIT(0) +#define RX_THROTTLED BIT(1) + +struct bridge { + /* context of the gadget port using bridge driver */ + void *ctx; + + /* bridge device array index mapped to the gadget port array index. + * data bridge[ch_id] <-- bridge --> gadget port[ch_id] + */ + unsigned int ch_id; + + /* flow control bits */ + unsigned long flags; + + /* data/ctrl bridge callbacks */ + struct bridge_ops ops; +}; + +#if defined(CONFIG_USB_QCOM_MDM_BRIDGE) || \ + defined(CONFIG_USB_QCOM_MDM_BRIDGE_MODULE) + +/* Bridge APIs called by gadget driver */ +int ctrl_bridge_open(struct bridge *); +void ctrl_bridge_close(unsigned int); +int ctrl_bridge_write(unsigned int, char *, size_t); +int ctrl_bridge_set_cbits(unsigned int, unsigned int); +unsigned int ctrl_bridge_get_cbits_tohost(unsigned int); +int data_bridge_open(struct bridge *brdg); +void data_bridge_close(unsigned int); +int data_bridge_write(unsigned int , struct sk_buff *); +int data_bridge_unthrottle_rx(unsigned int); + +/* defined in control bridge */ +int ctrl_bridge_probe(struct usb_interface *, struct usb_host_endpoint *, int); +void ctrl_bridge_disconnect(unsigned int); +int ctrl_bridge_resume(unsigned int); +int ctrl_bridge_suspend(unsigned int); + +#else + +static inline int __maybe_unused ctrl_bridge_open(struct bridge *brdg) +{ + return -ENODEV; +} + +static inline void __maybe_unused ctrl_bridge_close(unsigned int id) { } + +static inline int __maybe_unused ctrl_bridge_write(unsigned int id, + char *data, size_t size) +{ + return -ENODEV; +} + +static inline int __maybe_unused ctrl_bridge_set_cbits(unsigned int id, + unsigned int cbits) +{ + return -ENODEV; +} + +static inline unsigned int __maybe_unused +ctrl_bridge_get_cbits_tohost(unsigned int id) +{ + return -ENODEV; +} + +static inline int __maybe_unused data_bridge_open(struct bridge *brdg) +{ + return -ENODEV; +} + +static inline void __maybe_unused data_bridge_close(unsigned int id) { } + +static inline int __maybe_unused data_bridge_write(unsigned int id, + struct sk_buff *skb) +{ + return -ENODEV; +} + +static inline int __maybe_unused data_bridge_unthrottle_rx(unsigned int id) +{ + return -ENODEV; +} + +#endif +#endif diff --git a/arch/arm/mach-msm/include/mach/usb_gadget_xport.h b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h new file mode 100644 index 00000000000..d8a3e60f63d --- /dev/null +++ b/arch/arm/mach-msm/include/mach/usb_gadget_xport.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_GADGET_XPORT_H__ +#define __LINUX_USB_GADGET_XPORT_H__ + +enum transport_type { + USB_GADGET_XPORT_UNDEF, + USB_GADGET_XPORT_TTY, + USB_GADGET_XPORT_SDIO, + USB_GADGET_XPORT_SMD, + USB_GADGET_XPORT_BAM, + USB_GADGET_XPORT_BAM2BAM, + USB_GADGET_XPORT_HSIC, + USB_GADGET_XPORT_NONE, +}; + +#define XPORT_STR_LEN 10 + +static char *xport_to_str(enum transport_type t) +{ + switch (t) { + case USB_GADGET_XPORT_TTY: + return "TTY"; + case USB_GADGET_XPORT_SDIO: + return "SDIO"; + case USB_GADGET_XPORT_SMD: + return "SMD"; + case USB_GADGET_XPORT_BAM: + return "BAM"; + case USB_GADGET_XPORT_BAM2BAM: + return "BAM2BAM"; + case USB_GADGET_XPORT_HSIC: + return "HSIC"; + case USB_GADGET_XPORT_NONE: + return "NONE"; + default: + return "UNDEFINED"; + } +} + +static enum transport_type str_to_xport(const char *name) +{ + if (!strncasecmp("TTY", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_TTY; + if (!strncasecmp("SDIO", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_SDIO; + if (!strncasecmp("SMD", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_SMD; + if (!strncasecmp("BAM", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_BAM; + if (!strncasecmp("BAM2BAM", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_BAM2BAM; + if (!strncasecmp("HSIC", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_HSIC; + if (!strncasecmp("", name, XPORT_STR_LEN)) + return USB_GADGET_XPORT_NONE; + + return USB_GADGET_XPORT_UNDEF; +} + +enum gadget_type { + USB_GADGET_SERIAL, + USB_GADGET_RMNET, +}; + +#define NUM_RMNET_HSIC_PORTS 1 +#define NUM_DUN_HSIC_PORTS 1 +#define NUM_PORTS (NUM_RMNET_HSIC_PORTS \ + + NUM_DUN_HSIC_PORTS) + +int ghsic_ctrl_connect(void *, int); +void ghsic_ctrl_disconnect(void *, int); +int ghsic_ctrl_setup(unsigned int, enum gadget_type); +int ghsic_data_connect(void *, int); +void ghsic_data_disconnect(void *, int); +int ghsic_data_setup(unsigned int, enum gadget_type); + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/Makefile b/arch/arm/mach-msm/qdsp6v3/Makefile new file mode 100644 index 00000000000..a0aca30cf3c --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/Makefile @@ -0,0 +1,15 @@ +obj-$(CONFIG_MSM8X60_RTAC) += rtac.o +obj-y += audio_dev_ctl.o +obj-y += board-msm8x60-audio.o +obj-$(CONFIG_TIMPANI_CODEC) += snddev_icodec.o +obj-y += snddev_ecodec.o snddev_mi2s.o snddev_virtual.o +obj-y += apr.o apr_tal.o q6core.o dsp_debug.o +obj-y += audio_acdb.o +obj-y += q6asm.o q6adm.o q6afe.o +obj-y += pcm_out.o pcm_in.o fm.o +obj-y += audio_lpa.o +obj-y += q6voice.o +obj-y += snddev_hdmi.o +obj-y += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o audio_utils.o +obj-y += audio_mvs.o +obj-y += audio_wma.o audio_wmapro.o audio_aac.o diff --git a/arch/arm/mach-msm/qdsp6v3/aac_in.c b/arch/arm/mach-msm/qdsp6v3/aac_in.c new file mode 100644 index 00000000000..f01a1414ba2 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/aac_in.c @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 5 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((1536+sizeof(struct meta_out_dsp)) * 5)) + +#define AAC_FORMAT_ADTS 65535 + +void q6asm_aac_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} +/* ------------------- device --------------------- */ +static long aac_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_aac_enc_config *enc_cfg; + struct msm_audio_aac_config *aac_config; + uint32_t aac_mode = AAC_ENC_MODE_AAC_LC; + + enc_cfg = audio->enc_cfg; + aac_config = audio->codec_cfg; + /* ENCODE CFG (after new set of API's are published )bharath*/ + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + pr_debug("%s:sbr_ps_flag = %d, sbr_flag = %d\n", __func__, + aac_config->sbr_ps_on_flag, aac_config->sbr_on_flag); + if (aac_config->sbr_ps_on_flag) + aac_mode = AAC_ENC_MODE_EAAC_P; + else if (aac_config->sbr_on_flag) + aac_mode = AAC_ENC_MODE_AAC_P; + else + aac_mode = AAC_ENC_MODE_AAC_LC; + + rc = q6asm_enc_cfg_blk_aac(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->sample_rate, + enc_cfg->channels, + enc_cfg->bit_rate, + aac_mode, + enc_cfg->stream_format); + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure\ + failed rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: Rxed AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + if (enc_cfg->channels == CH_MODE_MONO) + cfg.channels = 1; + else + cfg.channels = 2; + cfg.sample_rate = enc_cfg->sample_rate; + cfg.bit_rate = enc_cfg->bit_rate; + /* ADTS(-1) to ADTS(0x00), RAW(0x00) to RAW(0x03) */ + cfg.stream_format = ((enc_cfg->stream_format == \ + 0x00) ? AUDIO_AAC_FORMAT_ADTS : AUDIO_AAC_FORMAT_RAW); + pr_debug("%s:session id %d: Get-aac-cfg: format=%d sr=%d\ + bitrate=%d\n", __func__, audio->ac->session, + cfg.stream_format, cfg.sample_rate, cfg.bit_rate); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + pr_debug("%s:session id %d: Set-aac-cfg: stream=%d\n", __func__, + audio->ac->session, cfg.stream_format); + + if ((cfg.stream_format != AUDIO_AAC_FORMAT_RAW) && + (cfg.stream_format != AAC_FORMAT_ADTS)) { + pr_aud_err("%s:session id %d: unsupported AAC format\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + + if (cfg.channels == 1) { + cfg.channels = CH_MODE_MONO; + } else if (cfg.channels == 2) { + cfg.channels = CH_MODE_STEREO; + } else { + rc = -EINVAL; + break; + } + if ((cfg.sample_rate < 8000) && (cfg.sample_rate > 48000)) { + pr_aud_err("%s: ERROR in setting samplerate = %d\n", + __func__, cfg.sample_rate); + rc = -EINVAL; + break; + } + enc_cfg->sample_rate = cfg.sample_rate; + enc_cfg->channels = cfg.channels; + enc_cfg->bit_rate = cfg.bit_rate; + enc_cfg->stream_format = + ((cfg.stream_format == AUDIO_AAC_FORMAT_RAW) ? \ + 0x03 : 0x00); + pr_debug("%s:session id %d: Set-aac-cfg:SR= 0x%x ch=0x%x\ + bitrate=0x%x, format(adts/raw) = %d\n", + __func__, audio->ac->session, enc_cfg->sample_rate, + enc_cfg->channels, enc_cfg->bit_rate, + enc_cfg->stream_format); + break; + } + case AUDIO_GET_AAC_CONFIG: { + if (copy_to_user((void *)arg, &audio->codec_cfg, + sizeof(struct msm_audio_aac_config))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config aac_cfg; + struct msm_audio_aac_config *audio_aac_cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + audio_aac_cfg = audio->codec_cfg; + + if (copy_from_user(&aac_cfg, (void *)arg, + sizeof(struct msm_audio_aac_config))) { + rc = -EFAULT; + break; + } + pr_debug("%s:session id %d: AUDIO_SET_AAC_CONFIG: sbr_flag = %d" + " sbr_ps_flag = %d\n", __func__, + audio->ac->session, aac_cfg.sbr_on_flag, + aac_cfg.sbr_ps_on_flag); + audio_aac_cfg->sbr_on_flag = aac_cfg.sbr_on_flag; + audio_aac_cfg->sbr_ps_on_flag = aac_cfg.sbr_ps_on_flag; + if ((audio_aac_cfg->sbr_on_flag == 1) || + (audio_aac_cfg->sbr_ps_on_flag == 1)) { + if (enc_cfg->sample_rate < 24000) { + pr_aud_err("%s: ERROR in setting samplerate = %d" + "\n", __func__, enc_cfg->sample_rate); + rc = -EINVAL; + break; + } + } + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int aac_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_aac_enc_config *enc_cfg; + struct msm_audio_aac_config *aac_config; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:Could not allocate memory for aac\ + driver\n", __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_aac_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + aac_config = audio->codec_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 1536; + audio->max_frames_per_buf = 5; + enc_cfg->sample_rate = 8000; + enc_cfg->channels = 1; + enc_cfg->bit_rate = 16000; + enc_cfg->stream_format = 0x00;/* 0:ADTS, 3:RAW */ + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + aac_config->format = AUDIO_AAC_FORMAT_ADTS; + aac_config->audio_object = AUDIO_AAC_OBJECT_LC; + aac_config->sbr_on_flag = 0; + aac_config->sbr_ps_on_flag = 0; + aac_config->channel_configuration = 1; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_aac_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s: Could not allocate memory for\ + audio client\n", __func__); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + /* open aac encoder in tunnel mode */ + audio->buf_cfg.frames_per_buf = 0x01; + + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_MPEG4_AAC, + FORMAT_LINEAR_PCM); + + if (rc < 0) { + pr_aud_err("%s:session id %d: NT Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + audio->buf_cfg.meta_info_enable = 0x01; + pr_aud_info("%s:session id %d: NT mode encoder success\n", __func__, + audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_MPEG4_AAC); + + if (rc < 0) { + pr_aud_err("%s:session id %d: Tunnel Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, + audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + audio->buf_cfg.meta_info_enable = 0x00; + pr_aud_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = aac_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = aac_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_aac_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_aac_in", + .fops = &audio_in_fops, +}; + +static int __init aac_in_init(void) +{ + return misc_register(&audio_aac_in_misc); +} +device_initcall(aac_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/amrnb_in.c b/arch/arm/mach-msm/qdsp6v3/amrnb_in.c new file mode 100644 index 00000000000..35984c38ef5 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/amrnb_in.c @@ -0,0 +1,330 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((32+sizeof(struct meta_out_dsp)) * 10)) + +void q6asm_amrnb_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode - %d\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ +static long amrnb_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + rc = q6asm_enc_cfg_blk_amrnb(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->band_mode, + enc_cfg->dtx_enable); + + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd amrnb media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, + audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure failed\ + rc=%d\n", __func__, + audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:AUDIO_STOP\n", __func__); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, + audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_AMRNB_ENC_CONFIG_V2: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_amrnb_enc_config_v2))) + rc = -EFAULT; + break; + } + case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { + struct msm_audio_amrnb_enc_config_v2 cfg; + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + enc_cfg = audio->enc_cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_amrnb_enc_config_v2))) { + rc = -EFAULT; + break; + } + if (cfg.band_mode > 8 || + cfg.band_mode < 1) { + pr_aud_err("%s:session id %d: invalid band mode\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + /* AMR NB encoder accepts values between 0-7 + while openmax provides value between 1-8 + as per spec */ + enc_cfg->band_mode = (cfg.band_mode - 1); + enc_cfg->dtx_enable = (cfg.dtx_enable ? 1 : 0); + enc_cfg->frame_format = 0; + pr_debug("%s:session id %d: band_mode = 0x%x dtx_enable=0x%x\n", + __func__, audio->ac->session, + enc_cfg->band_mode, enc_cfg->dtx_enable); + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int amrnb_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for amrnb\ + driver\n", __func__, audio->ac->session); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrnb_enc_config_v2), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 32; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->band_mode = 7; + enc_cfg->dtx_enable = 0; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_amrnb_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s:session id %d: Could not allocate memory for audio\ + client\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open amrnb encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_AMRNB, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_AMRNB); + if (rc < 0) { + pr_aud_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, audio->ac->session, + rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: T mode encoder success\n", + __func__, audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = amrnb_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = amrnb_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_amrnb_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrnb_in", + .fops = &audio_in_fops, +}; + +static int __init amrnb_in_init(void) +{ + return misc_register(&audio_amrnb_in_misc); +} + +device_initcall(amrnb_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/apr.c b/arch/arm/mach-msm/qdsp6v3/apr.c new file mode 100644 index 00000000000..8ee12c576ab --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/apr.c @@ -0,0 +1,681 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "apr_tal.h" +#include "dsp_debug.h" + +struct apr_q6 q6; +struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX]; +static atomic_t dsp_state; +static atomic_t modem_state; + +static wait_queue_head_t dsp_wait; +static wait_queue_head_t modem_wait; +/* Subsystem restart: QDSP6 data, functions */ +static struct workqueue_struct *apr_reset_workqueue; +static void apr_reset_deregister(struct work_struct *work); +struct apr_reset_work { + void *handle; + struct work_struct work; +}; + + +int apr_send_pkt(void *handle, uint32_t *buf) +{ + struct apr_svc *svc = handle; + struct apr_client *clnt; + struct apr_hdr *hdr; + uint16_t dest_id; + uint16_t client_id; + uint16_t w_len; + unsigned long flags; + + if (!handle || !buf) { + pr_aud_err("APR: Wrong parameters\n"); + return -EINVAL; + } + if (svc->need_reset) { + pr_aud_err("apr: send_pkt service need reset\n"); + return -ENETRESET; + } + + if ((svc->dest_id == APR_DEST_QDSP6) && + (atomic_read(&dsp_state) == 0)) { + pr_aud_err("apr: Still dsp is not Up\n"); + return -ENETRESET; + } else if ((svc->dest_id == APR_DEST_MODEM) && + (atomic_read(&modem_state) == 0)) { + pr_aud_err("apr: Still Modem is not Up\n"); + return -ENETRESET; + } + + + spin_lock_irqsave(&svc->w_lock, flags); + dest_id = svc->dest_id; + client_id = svc->client_id; + clnt = &client[dest_id][client_id]; + + if (!client[dest_id][client_id].handle) { + pr_aud_err("APR: Still service is not yet opened\n"); + spin_unlock_irqrestore(&svc->w_lock, flags); + return -EINVAL; + } + hdr = (struct apr_hdr *)buf; + + hdr->src_domain = APR_DOMAIN_APPS; + hdr->src_svc = svc->id; + if (dest_id == APR_DEST_MODEM) + hdr->dest_domain = APR_DOMAIN_MODEM; + else if (dest_id == APR_DEST_QDSP6) + hdr->dest_domain = APR_DOMAIN_ADSP; + + hdr->dest_svc = svc->id; + + w_len = apr_tal_write(clnt->handle, buf, hdr->pkt_size); + if (w_len != hdr->pkt_size) + pr_aud_err("Unable to write APR pkt successfully: %d\n", w_len); + spin_unlock_irqrestore(&svc->w_lock, flags); + + return w_len; +} + +static void apr_cb_func(void *buf, int len, void *priv) +{ + struct apr_client_data data; + struct apr_client *apr_client; + struct apr_svc *c_svc; + struct apr_hdr *hdr; + uint16_t hdr_size; + uint16_t msg_type; + uint16_t ver; + uint16_t src; + uint16_t svc; + uint16_t clnt; + int i; + int temp_port = 0; + uint32_t *ptr; + + pr_debug("APR2: len = %d\n", len); + ptr = buf; + pr_debug("\n*****************\n"); + for (i = 0; i < len/4; i++) + pr_debug("%x ", ptr[i]); + pr_debug("\n"); + pr_debug("\n*****************\n"); + + if (!buf || len <= APR_HDR_SIZE) { + pr_aud_err("APR: Improper apr pkt received:%p %d\n", + buf, len); + return; + } + hdr = buf; + + ver = hdr->hdr_field; + ver = (ver & 0x000F); + if (ver > APR_PKT_VER + 1) { + pr_aud_err("APR: Wrong version: %d\n", ver); + return; + } + + hdr_size = hdr->hdr_field; + hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4; + if (hdr_size < APR_HDR_SIZE) { + pr_aud_err("APR: Wrong hdr size:%d\n", hdr_size); + return; + } + + if (hdr->pkt_size < APR_HDR_SIZE) { + pr_aud_err("APR: Wrong paket size\n"); + return; + } + msg_type = hdr->hdr_field; + msg_type = (msg_type >> 0x08) & 0x0003; + if (msg_type >= APR_MSG_TYPE_MAX && + msg_type != APR_BASIC_RSP_RESULT) { + pr_aud_err("APR: Wrong message type: %d\n", msg_type); + return; + } + + if (hdr->src_domain >= APR_DOMAIN_MAX || + hdr->dest_domain >= APR_DOMAIN_MAX || + hdr->src_svc >= APR_SVC_MAX || + hdr->dest_svc >= APR_SVC_MAX) { + pr_aud_err("APR: Wrong APR header\n"); + return; + } + + svc = hdr->dest_svc; + if (hdr->src_domain == APR_DOMAIN_MODEM) { + src = APR_DEST_MODEM; + if (svc == APR_SVC_MVS || svc == APR_SVC_MVM || + svc == APR_SVC_CVS || svc == APR_SVC_CVP || + svc == APR_SVC_TEST_CLIENT) + clnt = APR_CLIENT_VOICE; + else { + pr_aud_err("APR: Wrong svc :%d\n", svc); + return; + } + } else if (hdr->src_domain == APR_DOMAIN_ADSP) { + src = APR_DEST_QDSP6; + if (svc == APR_SVC_AFE || svc == APR_SVC_ASM || + svc == APR_SVC_VSM || svc == APR_SVC_VPM || + svc == APR_SVC_ADM || svc == APR_SVC_ADSP_CORE || + svc == APR_SVC_TEST_CLIENT || svc == APR_SVC_ADSP_MVM || + svc == APR_SVC_ADSP_CVS || svc == APR_SVC_ADSP_CVP) + clnt = APR_CLIENT_AUDIO; + else { + pr_aud_err("APR: Wrong svc :%d\n", svc); + return; + } + } else { + pr_aud_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); + return; + } + + pr_debug("src =%d clnt = %d\n", src, clnt); + apr_client = &client[src][clnt]; + for (i = 0; i < APR_SVC_MAX; i++) + if (apr_client->svc[i].id == svc) { + pr_debug("%d\n", apr_client->svc[i].id); + c_svc = &apr_client->svc[i]; + break; + } + + if (i == APR_SVC_MAX) { + pr_aud_err("APR: service is not registered\n"); + return; + } + pr_debug("svc_idx = %d\n", i); + pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id, + c_svc->client_id, c_svc->fn, c_svc->priv); + data.payload_size = hdr->pkt_size - hdr_size; + data.opcode = hdr->opcode; + data.src = src; + data.src_port = hdr->src_port; + data.dest_port = hdr->dest_port; + data.token = hdr->token; + data.msg_type = msg_type; + if (data.payload_size > 0) + data.payload = (char *)hdr + hdr_size; + + temp_port = ((data.src_port >> 8) * 8) + (data.src_port & 0xFF); + pr_debug("port = %d t_port = %d\n", data.src_port, temp_port); + if (c_svc->port_cnt && c_svc->port_fn[temp_port]) + c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]); + else if (c_svc->fn) + c_svc->fn(&data, c_svc->priv); + else + pr_aud_err("APR: Rxed a packet for NULL callback\n"); +} + +struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, + uint32_t src_port, void *priv) +{ + int client_id = 0; + int svc_idx = 0; + int svc_id = 0; + int dest_id = 0; + int temp_port = 0; + struct apr_svc *svc = NULL; + int rc = 0; + + if (!dest || !svc_name || !svc_fn) + return NULL; + + if (!strcmp(dest, "ADSP")) + dest_id = APR_DEST_QDSP6; + else if (!strcmp(dest, "MODEM")) { + dest_id = APR_DEST_MODEM; + } else { + pr_aud_err("APR: wrong destination\n"); + goto done; + } + + if ((dest_id == APR_DEST_QDSP6) && + (atomic_read(&dsp_state) == 0)) { + rc = wait_event_timeout(dsp_wait, + (atomic_read(&dsp_state) == 1), 5*HZ); + if (rc == 0) { + pr_aud_err("apr: Still dsp is not Up\n"); + return NULL; + } + } else if ((dest_id == APR_DEST_MODEM) && + (atomic_read(&modem_state) == 0)) { + rc = wait_event_timeout(modem_wait, + (atomic_read(&modem_state) == 1), 5*HZ); + if (rc == 0) { + pr_aud_err("apr: Still Modem is not Up\n"); + return NULL; + } + } + + if (!strcmp(svc_name, "AFE")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 0; + svc_id = APR_SVC_AFE; + } else if (!strcmp(svc_name, "ASM")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 1; + svc_id = APR_SVC_ASM; + } else if (!strcmp(svc_name, "ADM")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 2; + svc_id = APR_SVC_ADM; + } else if (!strcmp(svc_name, "CORE")) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 3; + svc_id = APR_SVC_ADSP_CORE; + } else if (!strcmp(svc_name, "TEST")) { + if (dest_id == APR_DEST_QDSP6) { + client_id = APR_CLIENT_AUDIO; + svc_idx = 4; + } else { + client_id = APR_CLIENT_VOICE; + svc_idx = 7; + } + svc_id = APR_SVC_TEST_CLIENT; + } else if (!strcmp(svc_name, "VSM")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 0; + svc_id = APR_SVC_VSM; + } else if (!strcmp(svc_name, "VPM")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 1; + svc_id = APR_SVC_VPM; + } else if (!strcmp(svc_name, "MVS")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 2; + svc_id = APR_SVC_MVS; + } else if (!strcmp(svc_name, "MVM")) { + if (dest_id == APR_DEST_MODEM) { + client_id = APR_CLIENT_VOICE; + svc_idx = 3; + svc_id = APR_SVC_MVM; + } else { + client_id = APR_CLIENT_AUDIO; + svc_idx = 5; + svc_id = APR_SVC_ADSP_MVM; + } + } else if (!strcmp(svc_name, "CVS")) { + if (dest_id == APR_DEST_MODEM) { + client_id = APR_CLIENT_VOICE; + svc_idx = 4; + svc_id = APR_SVC_CVS; + } else { + client_id = APR_CLIENT_AUDIO; + svc_idx = 6; + svc_id = APR_SVC_ADSP_CVS; + } + } else if (!strcmp(svc_name, "CVP")) { + if (dest_id == APR_DEST_MODEM) { + client_id = APR_CLIENT_VOICE; + svc_idx = 5; + svc_id = APR_SVC_CVP; + } else { + client_id = APR_CLIENT_AUDIO; + svc_idx = 7; + svc_id = APR_SVC_ADSP_CVP; + } + } else if (!strcmp(svc_name, "SRD")) { + client_id = APR_CLIENT_VOICE; + svc_idx = 6; + svc_id = APR_SVC_SRD; + } else { + pr_aud_err("APR: Wrong svc name\n"); + goto done; + } + + pr_debug("svc name = %s c_id = %d dest_id = %d\n", + svc_name, client_id, dest_id); + mutex_lock(&q6.lock); + if (q6.state == APR_Q6_NOIMG) { + q6.pil = pil_get("q6"); + if (!q6.pil) { + pr_aud_err("APR: Unable to load q6 image\n"); + mutex_unlock(&q6.lock); + return svc; + } + q6.state = APR_Q6_LOADED; + } + mutex_unlock(&q6.lock); + mutex_lock(&client[dest_id][client_id].m_lock); + if (!client[dest_id][client_id].handle) { + client[dest_id][client_id].handle = apr_tal_open(client_id, + dest_id, APR_DL_SMD, apr_cb_func, NULL); + if (!client[dest_id][client_id].handle) { + svc = NULL; + pr_aud_err("APR: Unable to open handle\n"); + mutex_unlock(&client[dest_id][client_id].m_lock); + goto done; + } + } + mutex_unlock(&client[dest_id][client_id].m_lock); + svc = &client[dest_id][client_id].svc[svc_idx]; + mutex_lock(&svc->m_lock); + client[dest_id][client_id].id = client_id; + if (svc->need_reset) { + mutex_unlock(&svc->m_lock); + pr_aud_err("APR: Service needs reset\n"); + goto done; + } + svc->priv = priv; + svc->id = svc_id; + svc->dest_id = dest_id; + svc->client_id = client_id; + if (src_port != 0xFFFFFFFF) { + temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF); + if (temp_port >= APR_MAX_PORTS) { + mutex_unlock(&svc->m_lock); + pr_aud_err("APR: illegal port ID %d\n", temp_port); + svc = NULL; + goto done; + } + pr_debug("port = %d t_port = %d\n", src_port, temp_port); + if (!svc->port_cnt && !svc->svc_cnt) + client[dest_id][client_id].svc_cnt++; + svc->port_cnt++; + svc->port_fn[temp_port] = svc_fn; + svc->port_priv[temp_port] = priv; + } else { + if (!svc->fn) { + if (!svc->port_cnt && !svc->svc_cnt) + client[dest_id][client_id].svc_cnt++; + svc->fn = svc_fn; + if (svc->port_cnt) + svc->svc_cnt++; + } + } + + mutex_unlock(&svc->m_lock); +done: + return svc; +} + +static void apr_reset_deregister(struct work_struct *work) +{ + struct apr_svc *handle = NULL; + struct apr_reset_work *apr_reset = + container_of(work, struct apr_reset_work, work); + + handle = apr_reset->handle; + pr_debug("%s:handle[%p]\n", __func__, handle); + apr_deregister(handle); + kfree(apr_reset); + msleep(5); +} + +int apr_deregister(void *handle) +{ + struct apr_svc *svc = handle; + struct apr_client *clnt; + uint16_t dest_id; + uint16_t client_id; + + if (!handle) + return -EINVAL; + + mutex_lock(&svc->m_lock); + dest_id = svc->dest_id; + client_id = svc->client_id; + clnt = &client[dest_id][client_id]; + + if (svc->port_cnt > 0 || svc->svc_cnt > 0) { + if (svc->port_cnt) + svc->port_cnt--; + else if (svc->svc_cnt) + svc->svc_cnt--; + if (!svc->port_cnt && !svc->svc_cnt) { + client[dest_id][client_id].svc_cnt--; + svc->need_reset = 0x0; + } + } else if (client[dest_id][client_id].svc_cnt > 0) { + client[dest_id][client_id].svc_cnt--; + if (!client[dest_id][client_id].svc_cnt) { + svc->need_reset = 0x0; + pr_debug("%s: service is reset %p\n", __func__, svc); + } + } + + if (!svc->port_cnt && !svc->svc_cnt) { + svc->priv = NULL; + svc->id = 0; + svc->fn = NULL; + svc->dest_id = 0; + svc->client_id = 0; + svc->need_reset = 0x0; + } + if (client[dest_id][client_id].handle && + !client[dest_id][client_id].svc_cnt) { + apr_tal_close(client[dest_id][client_id].handle); + client[dest_id][client_id].handle = NULL; + } + mutex_unlock(&svc->m_lock); + + return 0; +} + +void apr_reset(void *handle) +{ + struct apr_reset_work *apr_reset_worker = NULL; + + if (!handle) + return; + pr_debug("%s: handle[%p]\n", __func__, handle); + + apr_reset_worker = kzalloc(sizeof(struct apr_reset_work), + GFP_ATOMIC); + if (apr_reset_worker == NULL || apr_reset_workqueue == NULL) { + pr_aud_err("%s: mem failure\n", __func__); + return; + } + apr_reset_worker->handle = handle; + INIT_WORK(&apr_reset_worker->work, apr_reset_deregister); + queue_work(apr_reset_workqueue, &apr_reset_worker->work); +} + +void change_q6_state(int state) +{ + mutex_lock(&q6.lock); + q6.state = state; + mutex_unlock(&q6.lock); +} + +int adsp_state(int state) +{ + pr_aud_info("dsp state = %d\n", state); + return 0; +} + +/* Dispatch the Reset events to Modem and audio clients */ +void dispatch_event(unsigned long code, unsigned short proc) +{ + struct apr_client *apr_client; + struct apr_client_data data; + struct apr_svc *svc; + uint16_t clnt; + int i, j; + + data.opcode = RESET_EVENTS; + data.reset_event = code; + data.reset_proc = proc; + + clnt = APR_CLIENT_AUDIO; + apr_client = &client[proc][clnt]; + for (i = 0; i < APR_SVC_MAX; i++) { + mutex_lock(&apr_client->svc[i].m_lock); + if (apr_client->svc[i].fn) { + apr_client->svc[i].need_reset = 0x1; + apr_client->svc[i].fn(&data, apr_client->svc[i].priv); + } + if (apr_client->svc[i].port_cnt) { + svc = &(apr_client->svc[i]); + svc->need_reset = 0x1; + for (j = 0; j < APR_MAX_PORTS; j++) + if (svc->port_fn[j]) + svc->port_fn[j](&data, + svc->port_priv[j]); + } + mutex_unlock(&apr_client->svc[i].m_lock); + } + + clnt = APR_CLIENT_VOICE; + apr_client = &client[proc][clnt]; + for (i = 0; i < APR_SVC_MAX; i++) { + mutex_lock(&apr_client->svc[i].m_lock); + if (apr_client->svc[i].fn) { + apr_client->svc[i].need_reset = 0x1; + apr_client->svc[i].fn(&data, apr_client->svc[i].priv); + } + if (apr_client->svc[i].port_cnt) { + svc = &(apr_client->svc[i]); + svc->need_reset = 0x1; + for (j = 0; j < APR_MAX_PORTS; j++) + if (svc->port_fn[j]) + svc->port_fn[j](&data, + svc->port_priv[j]); + } + mutex_unlock(&apr_client->svc[i].m_lock); + } +} + +static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + void *_cmd) +{ + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + pr_debug("M-Notify: Shutdown started\n"); + atomic_set(&modem_state, 0); + dispatch_event(code, APR_DEST_MODEM); + break; + case SUBSYS_AFTER_SHUTDOWN: + pr_debug("M-Notify: Shutdown Completed\n"); + break; + case SUBSYS_BEFORE_POWERUP: + pr_debug("M-notify: Bootup started\n"); + break; + case SUBSYS_AFTER_POWERUP: + if (atomic_read(&modem_state) == 0) { + atomic_set(&modem_state, 1); + wake_up(&modem_wait); + } + pr_debug("M-Notify: Bootup Completed\n"); + break; + default: + pr_aud_err("M-Notify: General: %lu\n", code); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block mnb = { + .notifier_call = modem_notifier_cb, +}; + +static int lpass_notifier_cb(struct notifier_block *this, unsigned long code, + void *_cmd) +{ + switch (code) { + case SUBSYS_BEFORE_SHUTDOWN: + pr_debug("L-Notify: Shutdown started\n"); + atomic_set(&dsp_state, 0); + dispatch_event(code, APR_DEST_QDSP6); + break; + case SUBSYS_AFTER_SHUTDOWN: + pr_debug("L-Notify: Shutdown Completed\n"); + break; + case SUBSYS_BEFORE_POWERUP: + pr_debug("L-notify: Bootup started\n"); + break; + case SUBSYS_AFTER_POWERUP: + if (atomic_read(&dsp_state) == 0) { + atomic_set(&dsp_state, 1); + wake_up(&dsp_wait); + } + pr_debug("L-Notify: Bootup Completed\n"); + break; + default: + pr_aud_err("L-Notify: Generel: %lu\n", code); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block lnb = { + .notifier_call = lpass_notifier_cb, +}; + + +static int __init apr_init(void) +{ + int i, j, k; + + pr_aud_info("apr_probe\n"); + for (i = 0; i < APR_DEST_MAX; i++) + for (j = 0; j < APR_CLIENT_MAX; j++) { + mutex_init(&client[i][j].m_lock); + for (k = 0; k < APR_SVC_MAX; k++) { + mutex_init(&client[i][j].svc[k].m_lock); + spin_lock_init(&client[i][j].svc[k].w_lock); + } + } + mutex_init(&q6.lock); + dsp_debug_register(adsp_state); + apr_reset_workqueue = + create_singlethread_workqueue("apr_driver"); + if (!apr_reset_workqueue) + return -ENOMEM; + return 0; +} +device_initcall(apr_init); + +static int __init apr_late_init(void) +{ + void *ret; + init_waitqueue_head(&dsp_wait); + init_waitqueue_head(&modem_wait); + atomic_set(&dsp_state, 1); + atomic_set(&modem_state, 1); + ret = subsys_notif_register_notifier("modem", &mnb); + pr_debug("subsys_register_notifier: ret1 = %p\n", ret); + ret = subsys_notif_register_notifier("lpass", &lnb); + pr_debug("subsys_register_notifier: ret2 = %p\n", ret); + + return 0; +} +late_initcall(apr_late_init); diff --git a/arch/arm/mach-msm/qdsp6v3/apr_tal.c b/arch/arm/mach-msm/qdsp6v3/apr_tal.c new file mode 100644 index 00000000000..44047bd8756 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/apr_tal.c @@ -0,0 +1,288 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apr_tal.h" +#include "../clock-8x60.h" + +static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = { + { + "apr_audio_svc", + "apr_voice_svc", + }, + { + "apr_audio_svc", + "apr_voice_svc", + }, +}; + +struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX]; + +int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len) +{ + int w_len; + unsigned long flags; + + + spin_lock_irqsave(&apr_ch->w_lock, flags); + if (smd_write_avail(apr_ch->ch) < len) { + spin_unlock_irqrestore(&apr_ch->w_lock, flags); + return -EAGAIN; + } + + w_len = smd_write(apr_ch->ch, data, len); + spin_unlock_irqrestore(&apr_ch->w_lock, flags); + pr_debug("apr_tal:w_len = %d\n", w_len); + + if (w_len != len) { + pr_aud_err("apr_tal: Error in write\n"); + return -ENETRESET; + } + return w_len; +} + +int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len) +{ + int rc = 0, retries = 0; + + if (!apr_ch->ch) + return -EINVAL; + + do { + if (rc == -EAGAIN) + udelay(50); + + rc = __apr_tal_write(apr_ch, data, len); + } while (rc == -EAGAIN && retries++ < 300); + + if (rc == -EAGAIN) + pr_aud_err("apr_tal: TIMEOUT for write\n"); + + return rc; +} + +static void apr_tal_notify(void *priv, unsigned event) +{ + struct apr_svc_ch_dev *apr_ch = priv; + int len, r_len, sz; + int pkt_cnt = 0; + unsigned long flags; + + pr_debug("event = %d\n", event); + switch (event) { + case SMD_EVENT_DATA: + pkt_cnt = 0; + spin_lock_irqsave(&apr_ch->lock, flags); +check_pending: + len = smd_read_avail(apr_ch->ch); + if (len < 0) { + pr_aud_err("apr_tal: Invalid Read Event :%d\n", len); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + sz = smd_cur_packet_size(apr_ch->ch); + if (sz < 0) { + pr_debug("pkt size is zero\n"); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + if (!len && !sz && !pkt_cnt) + goto check_write_avail; + if (!len) { + pr_debug("len = %d pkt_cnt = %d\n", len, pkt_cnt); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + r_len = smd_read_from_cb(apr_ch->ch, apr_ch->data, len); + if (len != r_len) { + pr_aud_err("apr_tal: Invalid Read\n"); + spin_unlock_irqrestore(&apr_ch->lock, flags); + return; + } + pkt_cnt++; + pr_debug("%d %d %d\n", len, sz, pkt_cnt); + if (apr_ch->func) + apr_ch->func(apr_ch->data, r_len, apr_ch->priv); + goto check_pending; +check_write_avail: + if (smd_write_avail(apr_ch->ch)) + wake_up(&apr_ch->wait); + spin_unlock_irqrestore(&apr_ch->lock, flags); + break; + case SMD_EVENT_OPEN: + pr_aud_info("apr_tal: SMD_EVENT_OPEN\n"); + apr_ch->smd_state = 1; + wake_up(&apr_ch->wait); + break; + case SMD_EVENT_CLOSE: + pr_aud_info("apr_tal: SMD_EVENT_CLOSE\n"); + break; + } +} + +struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest, + uint32_t dl, apr_svc_cb_fn func, void *priv) +{ + int rc; + + if ((svc >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) || + (dl >= APR_DL_MAX)) { + pr_aud_err("apr_tal: Invalid params\n"); + return NULL; + } + + if (apr_svc_ch[dl][dest][svc].ch) { + pr_aud_err("apr_tal: This channel alreday openend\n"); + return NULL; + } + + mutex_lock(&apr_svc_ch[dl][dest][svc].m_lock); + if (!apr_svc_ch[dl][dest][svc].dest_state) { + rc = wait_event_timeout(apr_svc_ch[dl][dest][svc].dest, + apr_svc_ch[dl][dest][svc].dest_state, + msecs_to_jiffies(APR_OPEN_TIMEOUT_MS)); + if (rc == 0) { + pr_aud_err("%s: TIMEOUT for dest %d svc %d\n", __func__, dest, svc); + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + BUG(); + } + pr_debug("apr_tal:Wakeup done\n"); + apr_svc_ch[dl][dest][svc].dest_state = 0; + } + rc = smd_named_open_on_edge(svc_names[dest][svc], dest, + &apr_svc_ch[dl][dest][svc].ch, + &apr_svc_ch[dl][dest][svc], + apr_tal_notify); + if (rc < 0) { + pr_aud_err("apr_tal: smd_open failed %s\n", + svc_names[dest][svc]); + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + return NULL; + } + rc = wait_event_timeout(apr_svc_ch[dl][dest][svc].wait, + (apr_svc_ch[dl][dest][svc].smd_state == 1), 5 * HZ); + if (rc == 0) { + pr_aud_err("apr_tal:TIMEOUT for OPEN event\n"); + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + BUG(); + return NULL; + } + if (!apr_svc_ch[dl][dest][svc].dest_state) { + apr_svc_ch[dl][dest][svc].dest_state = 1; + pr_debug("apr_tal:Waiting for apr svc init\n"); + msleep(200); + pr_debug("apr_tal:apr svc init done\n"); + } + apr_svc_ch[dl][dest][svc].smd_state = 0; + + apr_svc_ch[dl][dest][svc].func = func; + apr_svc_ch[dl][dest][svc].priv = priv; + mutex_unlock(&apr_svc_ch[dl][dest][svc].m_lock); + + return &apr_svc_ch[dl][dest][svc]; +} + +int apr_tal_close(struct apr_svc_ch_dev *apr_ch) +{ + int r; + + if (!apr_ch->ch) + return -EINVAL; + + mutex_lock(&apr_ch->m_lock); + r = smd_close(apr_ch->ch); + apr_ch->ch = NULL; + apr_ch->func = NULL; + apr_ch->priv = NULL; + mutex_unlock(&apr_ch->m_lock); + return r; +} + +static int apr_smd_probe(struct platform_device *pdev) +{ + int dest; + int clnt; + + if (pdev->id == APR_DEST_MODEM) { + pr_aud_info("apr_tal:Modem Is Up\n"); + dest = APR_DEST_MODEM; + clnt = APR_CLIENT_VOICE; + apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; + wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); + } else if (pdev->id == APR_DEST_QDSP6) { + pr_aud_info("apr_tal:Q6 Is Up\n"); + /* + local_src_disable(PLL_4); + */ + dest = APR_DEST_QDSP6; + clnt = APR_CLIENT_AUDIO; + apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; + wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); + } else + pr_aud_err("apr_tal:Invalid Dest Id: %d\n", pdev->id); + return 0; +} + +static struct platform_driver apr_q6_driver = { + .probe = apr_smd_probe, + .driver = { + .name = "apr_audio_svc", + .owner = THIS_MODULE, + }, +}; + +static struct platform_driver apr_modem_driver = { + .probe = apr_smd_probe, + .driver = { + .name = "apr_voice_svc", + .owner = THIS_MODULE, + }, +}; + +static int __init apr_tal_init(void) +{ + int i, j, k; + + for (i = 0; i < APR_DL_MAX; i++) + for (j = 0; j < APR_DEST_MAX; j++) + for (k = 0; k < APR_CLIENT_MAX; k++) { + init_waitqueue_head(&apr_svc_ch[i][j][k].wait); + init_waitqueue_head(&apr_svc_ch[i][j][k].dest); + spin_lock_init(&apr_svc_ch[i][j][k].lock); + spin_lock_init(&apr_svc_ch[i][j][k].w_lock); + mutex_init(&apr_svc_ch[i][j][k].m_lock); + } + platform_driver_register(&apr_q6_driver); + platform_driver_register(&apr_modem_driver); + return 0; +} +device_initcall(apr_tal_init); diff --git a/arch/arm/mach-msm/qdsp6v3/apr_tal.h b/arch/arm/mach-msm/qdsp6v3/apr_tal.h new file mode 100644 index 00000000000..88f8ba459ea --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/apr_tal.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __APR_TAL_H_ +#define __APR_TAL_H_ + +#include +#include +#include + +/* APR Client IDs */ +#define APR_CLIENT_AUDIO 0x0 +#define APR_CLIENT_VOICE 0x1 +#define APR_CLIENT_MAX 0x2 + +#define APR_DL_SMD 0 +#define APR_DL_MAX 1 + +#define APR_DEST_MODEM 0 +#define APR_DEST_QDSP6 1 +#define APR_DEST_MAX 2 + +#define APR_MAX_BUF 8192 + +#define APR_OPEN_TIMEOUT_MS 5000 + +typedef void (*apr_svc_cb_fn)(void *buf, int len, void *priv); +struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest, + uint32_t dl, apr_svc_cb_fn func, void *priv); +int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len); +int apr_tal_close(struct apr_svc_ch_dev *apr_ch); +struct apr_svc_ch_dev { + struct smd_channel *ch; + spinlock_t lock; + spinlock_t w_lock; + struct mutex m_lock; + apr_svc_cb_fn func; + char data[APR_MAX_BUF]; + wait_queue_head_t wait; + void *priv; + uint32_t smd_state; + wait_queue_head_t dest; + uint32_t dest_state; +}; + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/audio_aac.c b/arch/arm/mach-msm/qdsp6v3/audio_aac.c new file mode 100644 index 00000000000..88f8ba459ea --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_aac.c @@ -0,0 +1,71 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __APR_TAL_H_ +#define __APR_TAL_H_ + +#include +#include +#include + +/* APR Client IDs */ +#define APR_CLIENT_AUDIO 0x0 +#define APR_CLIENT_VOICE 0x1 +#define APR_CLIENT_MAX 0x2 + +#define APR_DL_SMD 0 +#define APR_DL_MAX 1 + +#define APR_DEST_MODEM 0 +#define APR_DEST_QDSP6 1 +#define APR_DEST_MAX 2 + +#define APR_MAX_BUF 8192 + +#define APR_OPEN_TIMEOUT_MS 5000 + +typedef void (*apr_svc_cb_fn)(void *buf, int len, void *priv); +struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest, + uint32_t dl, apr_svc_cb_fn func, void *priv); +int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len); +int apr_tal_close(struct apr_svc_ch_dev *apr_ch); +struct apr_svc_ch_dev { + struct smd_channel *ch; + spinlock_t lock; + spinlock_t w_lock; + struct mutex m_lock; + apr_svc_cb_fn func; + char data[APR_MAX_BUF]; + wait_queue_head_t wait; + void *priv; + uint32_t smd_state; + wait_queue_head_t dest; + uint32_t dest_state; +}; + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/audio_acdb.c b/arch/arm/mach-msm/qdsp6v3/audio_acdb.c new file mode 100644 index 00000000000..dca0df60769 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_acdb.c @@ -0,0 +1,780 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" + + +#define MAX_NETWORKS 9 +#define NUM_ACTIVE_NETWORKS 6 +#define VOCPROC_STREAM_OFFSET NUM_ACTIVE_NETWORKS +#define VOCPROC_VOL_OFFSET (NUM_ACTIVE_NETWORKS * 2) +#define NUM_VOCPROC_CAL_TYPES (NUM_ACTIVE_NETWORKS * 3) +#define NUM_AUDPROC_CAL_TYPES 3 +#define ACDB_BLOCK_SIZE 4096 +#define NUM_VOCPROC_BLOCKS 18 + +enum { + RX_CAL, + TX_CAL, + MAX_AUDPROC_TYPES +}; + +struct acdb_data { + struct mutex acdb_mutex; + + /* ANC Cal */ + struct acdb_cal_block anc_cal; + + /* AudProc Cal */ + struct acdb_cal_block audproc_cal[MAX_AUDPROC_TYPES]; + struct acdb_cal_block audstrm_cal[MAX_AUDPROC_TYPES]; + struct acdb_cal_block audvol_cal[MAX_AUDPROC_TYPES]; + + /* VocProc Cal */ + struct acdb_cal_block vocproc_cal[MAX_NETWORKS]; + struct acdb_cal_block vocstrm_cal[MAX_NETWORKS]; + struct acdb_cal_block vocvol_cal[MAX_NETWORKS]; + uint32_t vocproc_cal_size; + uint32_t vocstrm_cal_size; + uint32_t vocvol_cal_size; + + /* Sidetone Cal */ + struct sidetone_cal sidetone_cal; + + /* PMEM information */ + int pmem_fd; + unsigned long paddr; + unsigned long kvaddr; + unsigned long pmem_len; + struct file *file; + +}; + +static struct acdb_data acdb_data; +static atomic_t usage_count; + +void get_anc_cal(struct acdb_cal_block *cal_block) +{ + pr_debug("%s\n", __func__); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.anc_cal.cal_kvaddr; + cal_block->cal_paddr = acdb_data.anc_cal.cal_paddr; + cal_block->cal_size = acdb_data.anc_cal.cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_anc_cal(struct cal_block *cal_block) +{ + pr_debug("%s,\n", __func__); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + acdb_data.anc_cal.cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.anc_cal.cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.anc_cal.cal_size = + cal_block->cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_audproc_buffer_data(struct audproc_buffer_data *cal_buffers) +{ + int i; + pr_debug("%s\n", __func__); + + if (cal_buffers == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + for (i = 0; i < NUM_AUDPROC_BUFFERS; i++) { + cal_buffers->phys_addr[i] = (uint32_t) + (acdb_data.paddr + + (NUM_VOCPROC_BLOCKS + i) * ACDB_BLOCK_SIZE); + cal_buffers->buf_size[i] = ACDB_BLOCK_SIZE; + } +done: + return; +} + +void store_audproc_cal(int32_t path, struct cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + mutex_lock(&acdb_data.acdb_mutex); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + acdb_data.audproc_cal[path].cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.audproc_cal[path].cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.audproc_cal[path].cal_size = + cal_block->cal_size; + +done: + mutex_unlock(&acdb_data.acdb_mutex); + return; +} + +void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block) +{ + pr_aud_info("%s, path = %d\n", __func__, path); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.audproc_cal[path].cal_kvaddr; + cal_block->cal_paddr = acdb_data.audproc_cal[path].cal_paddr; + cal_block->cal_size = acdb_data.audproc_cal[path].cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_audstrm_cal(int32_t path, struct cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + mutex_lock(&acdb_data.acdb_mutex); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + acdb_data.audstrm_cal[path].cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.audstrm_cal[path].cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.audstrm_cal[path].cal_size = + cal_block->cal_size; + +done: + mutex_unlock(&acdb_data.acdb_mutex); + return; +} + +void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.audstrm_cal[path].cal_kvaddr; + cal_block->cal_paddr = acdb_data.audstrm_cal[path].cal_paddr; + cal_block->cal_size = acdb_data.audstrm_cal[path].cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_audvol_cal(int32_t path, struct cal_block *cal_block) +{ + pr_debug("%s, path = %d\n", __func__, path); + + mutex_lock(&acdb_data.acdb_mutex); + + if (cal_block->cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_block->cal_offset, + acdb_data.pmem_len); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + acdb_data.audvol_cal[path].cal_kvaddr = + cal_block->cal_offset + acdb_data.kvaddr; + acdb_data.audvol_cal[path].cal_paddr = + cal_block->cal_offset + acdb_data.paddr; + acdb_data.audvol_cal[path].cal_size = + cal_block->cal_size; + +done: + mutex_unlock(&acdb_data.acdb_mutex); + return; +} + +void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block) +{ + pr_aud_info("%s, path = %d\n", __func__, path); + + if (cal_block == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + if (path >= MAX_AUDPROC_TYPES) { + pr_aud_err("ACDB=> Bad path sent to %s, path: %d\n", + __func__, path); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_block->cal_kvaddr = acdb_data.audvol_cal[path].cal_kvaddr; + cal_block->cal_paddr = acdb_data.audvol_cal[path].cal_paddr; + cal_block->cal_size = acdb_data.audvol_cal[path].cal_size; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + + +void store_vocproc_cal(int32_t len, struct cal_block *cal_blocks) +{ + int i; + pr_debug("%s\n", __func__); + + if (len > MAX_NETWORKS) { + pr_aud_err("%s: Calibration sent for %d networks, only %d are " + "supported!\n", __func__, len, MAX_NETWORKS); + goto done; + } + + + mutex_lock(&acdb_data.acdb_mutex); + + for (i = 0; i < len; i++) { + if (cal_blocks[i].cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_blocks[i].cal_offset, + acdb_data.pmem_len); + acdb_data.vocproc_cal[i].cal_size = 0; + } else { + acdb_data.vocproc_cal[i].cal_size = + cal_blocks[i].cal_size; + acdb_data.vocproc_cal[i].cal_paddr = + cal_blocks[i].cal_offset + + acdb_data.paddr; + acdb_data.vocproc_cal[i].cal_kvaddr = + cal_blocks[i].cal_offset + + acdb_data.kvaddr; + } + } + acdb_data.vocproc_cal_size = len; + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_vocproc_cal(struct acdb_cal_data *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->num_cal_blocks = acdb_data.vocproc_cal_size; + cal_data->cal_blocks = &acdb_data.vocproc_cal[0]; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_vocstrm_cal(int32_t len, struct cal_block *cal_blocks) +{ + int i; + pr_debug("%s\n", __func__); + + if (len > MAX_NETWORKS) { + pr_aud_err("%s: Calibration sent for %d networks, only %d are " + "supported!\n", __func__, len, MAX_NETWORKS); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + for (i = 0; i < len; i++) { + if (cal_blocks[i].cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_blocks[i].cal_offset, + acdb_data.pmem_len); + acdb_data.vocstrm_cal[i].cal_size = 0; + } else { + acdb_data.vocstrm_cal[i].cal_size = + cal_blocks[i].cal_size; + acdb_data.vocstrm_cal[i].cal_paddr = + cal_blocks[i].cal_offset + + acdb_data.paddr; + acdb_data.vocstrm_cal[i].cal_kvaddr = + cal_blocks[i].cal_offset + + acdb_data.kvaddr; + } + } + acdb_data.vocstrm_cal_size = len; + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_vocstrm_cal(struct acdb_cal_data *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->num_cal_blocks = acdb_data.vocstrm_cal_size; + cal_data->cal_blocks = &acdb_data.vocstrm_cal[0]; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_vocvol_cal(int32_t len, struct cal_block *cal_blocks) +{ + int i; + pr_debug("%s\n", __func__); + + if (len > MAX_NETWORKS) { + pr_aud_err("%s: Calibration sent for %d networks, only %d are " + "supported!\n", __func__, len, MAX_NETWORKS); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + for (i = 0; i < len; i++) { + if (cal_blocks[i].cal_offset > acdb_data.pmem_len) { + pr_aud_err("%s: offset %d is > pmem_len %ld\n", + __func__, cal_blocks[i].cal_offset, + acdb_data.pmem_len); + acdb_data.vocvol_cal[i].cal_size = 0; + } else { + acdb_data.vocvol_cal[i].cal_size = + cal_blocks[i].cal_size; + acdb_data.vocvol_cal[i].cal_paddr = + cal_blocks[i].cal_offset + + acdb_data.paddr; + acdb_data.vocvol_cal[i].cal_kvaddr = + cal_blocks[i].cal_offset + + acdb_data.kvaddr; + } + } + acdb_data.vocvol_cal_size = len; + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void get_vocvol_cal(struct acdb_cal_data *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->num_cal_blocks = acdb_data.vocvol_cal_size; + cal_data->cal_blocks = &acdb_data.vocvol_cal[0]; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +void store_sidetone_cal(struct sidetone_cal *cal_data) +{ + pr_debug("%s\n", __func__); + + mutex_lock(&acdb_data.acdb_mutex); + + acdb_data.sidetone_cal.enable = cal_data->enable; + acdb_data.sidetone_cal.gain = cal_data->gain; + + mutex_unlock(&acdb_data.acdb_mutex); +} + + +void get_sidetone_cal(struct sidetone_cal *cal_data) +{ + pr_debug("%s\n", __func__); + + if (cal_data == NULL) { + pr_aud_err("ACDB=> NULL pointer sent to %s\n", __func__); + goto done; + } + + mutex_lock(&acdb_data.acdb_mutex); + + cal_data->enable = acdb_data.sidetone_cal.enable; + cal_data->gain = acdb_data.sidetone_cal.gain; + + mutex_unlock(&acdb_data.acdb_mutex); +done: + return; +} + +static int acdb_open(struct inode *inode, struct file *f) +{ + s32 result = 0; + pr_aud_info("%s\n", __func__); + + mutex_lock(&acdb_data.acdb_mutex); + if (acdb_data.pmem_fd) { + pr_aud_info("%s: ACDB opened but PMEM allocated, using existing PMEM!\n", + __func__); + } + mutex_unlock(&acdb_data.acdb_mutex); + + atomic_inc(&usage_count); + return result; +} + +static int deregister_pmem(void) +{ + int result; + struct audproc_buffer_data buffer; + + get_audproc_buffer_data(&buffer); + + result = adm_memory_unmap_regions(buffer.phys_addr, + buffer.buf_size, NUM_AUDPROC_BUFFERS); + + if (result < 0) + pr_aud_err("Audcal unmap did not work!\n"); + + if (acdb_data.pmem_fd) { + put_pmem_file(acdb_data.file); + acdb_data.pmem_fd = 0; + } + return result; +} + +static int register_pmem(void) +{ + int result; + struct audproc_buffer_data buffer; + + result = get_pmem_file(acdb_data.pmem_fd, &acdb_data.paddr, + &acdb_data.kvaddr, &acdb_data.pmem_len, + &acdb_data.file); + if (result != 0) { + acdb_data.pmem_fd = 0; + pr_aud_err("%s: Could not register PMEM!!!\n", __func__); + goto done; + } + + pr_debug("AUDIO_REGISTER_PMEM done! paddr = 0x%lx, " + "kvaddr = 0x%lx, len = x%lx\n", acdb_data.paddr, + acdb_data.kvaddr, acdb_data.pmem_len); + get_audproc_buffer_data(&buffer); + result = adm_memory_map_regions(buffer.phys_addr, 0, + buffer.buf_size, + NUM_AUDPROC_BUFFERS); + if (result < 0) + pr_aud_err("Audcal mmap did not work!\n"); + goto done; + +done: + return result; +} +static long acdb_ioctl(struct file *f, + unsigned int cmd, unsigned long arg) +{ + s32 result = 0; + s32 audproc_path; + s32 size; + struct cal_block data[MAX_NETWORKS]; + pr_debug("%s\n", __func__); + + switch (cmd) { + case AUDIO_REGISTER_PMEM: + pr_debug("AUDIO_REGISTER_PMEM\n"); + mutex_lock(&acdb_data.acdb_mutex); + if (acdb_data.pmem_fd) { + deregister_pmem(); + pr_aud_info("Remove the existing PMEM\n"); + } + + if (copy_from_user(&acdb_data.pmem_fd, (void *)arg, + sizeof(acdb_data.pmem_fd))) + result = -EFAULT; + else + result = register_pmem(); + mutex_unlock(&acdb_data.acdb_mutex); + goto done; + + case AUDIO_DEREGISTER_PMEM: + pr_debug("AUDIO_DEREGISTER_PMEM\n"); + mutex_lock(&acdb_data.acdb_mutex); + deregister_pmem(); + mutex_unlock(&acdb_data.acdb_mutex); + goto done; + } + + if (copy_from_user(&size, (void *) arg, sizeof(size))) { + + result = -EFAULT; + goto done; + } + + if (size <= 0) { + pr_aud_err("%s: Invalid size sent to driver: %d\n", + __func__, size); + result = -EFAULT; + goto done; + } + + if (copy_from_user(data, (void *)(arg + sizeof(size)), size)) { + + pr_aud_err("%s: fail to copy table size %d\n", __func__, size); + result = -EFAULT; + goto done; + } + + if (data == NULL) { + pr_aud_err("%s: NULL pointer sent to driver!\n", __func__); + result = -EFAULT; + goto done; + } + + switch (cmd) { + case AUDIO_SET_AUDPROC_TX_CAL: + audproc_path = TX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audproc_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_RX_CAL: + audproc_path = RX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audproc_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_TX_STREAM_CAL: + audproc_path = TX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audstrm_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_RX_STREAM_CAL: + audproc_path = RX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audstrm_cal(audproc_path, data); + break; + case AUDIO_SET_AUDPROC_TX_VOL_CAL: + audproc_path = TX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audvol_cal(audproc_path, data); + case AUDIO_SET_AUDPROC_RX_VOL_CAL: + audproc_path = RX_CAL; + if (size > sizeof(struct cal_block)) + pr_aud_err("%s: More Audproc Cal then expected, " + "size received: %d\n", __func__, size); + store_audvol_cal(audproc_path, data); + break; + case AUDIO_SET_VOCPROC_CAL: + store_vocproc_cal(size / sizeof(struct cal_block), data); + break; + case AUDIO_SET_VOCPROC_STREAM_CAL: + store_vocstrm_cal(size / sizeof(struct cal_block), data); + break; + case AUDIO_SET_VOCPROC_VOL_CAL: + store_vocvol_cal(size / sizeof(struct cal_block), data); + break; + case AUDIO_SET_SIDETONE_CAL: + if (size > sizeof(struct sidetone_cal)) + pr_aud_err("%s: More sidetone cal then expected, " + "size received: %d\n", __func__, size); + store_sidetone_cal((struct sidetone_cal *)data); + break; + case AUDIO_SET_ANC_CAL: + store_anc_cal(data); + break; + default: + pr_aud_err("ACDB=> ACDB ioctl not found!\n"); + } + +done: + return result; +} + +static int acdb_mmap(struct file *file, struct vm_area_struct *vma) +{ + int result = 0; + int size = vma->vm_end - vma->vm_start; + + pr_debug("%s\n", __func__); + + mutex_lock(&acdb_data.acdb_mutex); + if (acdb_data.pmem_fd) { + if (size <= acdb_data.pmem_len) { + vma->vm_page_prot = pgprot_noncached( + vma->vm_page_prot); + result = remap_pfn_range(vma, + vma->vm_start, + acdb_data.paddr >> PAGE_SHIFT, + size, + vma->vm_page_prot); + } else { + pr_aud_err("%s: Not enough PMEM memory!\n", __func__); + result = -ENOMEM; + } + } else { + pr_aud_err("%s: PMEM is not allocated, yet!\n", __func__); + result = -ENODEV; + } + mutex_unlock(&acdb_data.acdb_mutex); + + return result; +} + +static int acdb_release(struct inode *inode, struct file *f) +{ + s32 result = 0; + + atomic_dec(&usage_count); + atomic_read(&usage_count); + + pr_aud_info("%s: ref count %d!\n", __func__, + atomic_read(&usage_count)); + + if (atomic_read(&usage_count) >= 1) { + result = -EBUSY; + } else { + mutex_lock(&acdb_data.acdb_mutex); + result = deregister_pmem(); + mutex_unlock(&acdb_data.acdb_mutex); + } + + return result; +} + +static const struct file_operations acdb_fops = { + .owner = THIS_MODULE, + .open = acdb_open, + .release = acdb_release, + .unlocked_ioctl = acdb_ioctl, + .mmap = acdb_mmap, +}; + +struct miscdevice acdb_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_acdb", + .fops = &acdb_fops, +}; + +static int __init acdb_init(void) +{ + pr_aud_info("%s\n", __func__); + memset(&acdb_data, 0, sizeof(acdb_data)); + mutex_init(&acdb_data.acdb_mutex); + atomic_set(&usage_count, 0); + return misc_register(&acdb_misc); +} + +static void __exit acdb_exit(void) +{ +} + +module_init(acdb_init); +module_exit(acdb_exit); + +MODULE_DESCRIPTION("MSM 8x60 Audio ACDB driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_acdb.h b/arch/arm/mach-msm/qdsp6v3/audio_acdb.h new file mode 100644 index 00000000000..17f9a7c4624 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_acdb.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef _AUDIO_ACDB_H +#define _AUDIO_ACDB_H + +#include +#include "q6adm.h" + +#define NUM_AUDPROC_BUFFERS 6 + +struct acdb_cal_block { + uint32_t cal_size; + uint32_t cal_kvaddr; + uint32_t cal_paddr; +}; + +struct acdb_cal_data { + uint32_t num_cal_blocks; + struct acdb_cal_block *cal_blocks; +}; + +struct audproc_buffer_data { + uint32_t buf_size[NUM_AUDPROC_BUFFERS]; + uint32_t phys_addr[NUM_AUDPROC_BUFFERS]; +}; + +void get_audproc_buffer_data(struct audproc_buffer_data *cal_buffers); +void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block); +void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block); +void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block); +void get_vocproc_cal(struct acdb_cal_data *cal_data); +void get_vocstrm_cal(struct acdb_cal_data *cal_data); +void get_vocvol_cal(struct acdb_cal_data *cal_data); +void get_sidetone_cal(struct sidetone_cal *cal_data); +void get_anc_cal(struct acdb_cal_block *cal_block); + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c b/arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c new file mode 100644 index 00000000000..f170ae52131 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_dev_ctl.c @@ -0,0 +1,1757 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "q6adm.h" +#include "rtac.h" + +#ifndef MAX +#define MAX(x, y) (((x) > (y)) ? (x) : (y)) +#endif + + +static DEFINE_MUTEX(session_lock); + +struct audio_dev_ctrl_state { + struct msm_snddev_info *devs[AUDIO_DEV_CTL_MAX_DEV]; + u32 num_dev; + atomic_t opened; + struct msm_snddev_info *voice_rx_dev; + struct msm_snddev_info *voice_tx_dev; + wait_queue_head_t wait; +}; + +static struct audio_dev_ctrl_state audio_dev_ctrl; +struct event_listner event; + +#define PLAYBACK 0x1 +#define LIVE_RECORDING 0x2 +#define NON_LIVE_RECORDING 0x3 +#define MAX_COPP_DEVICES 4 +static int voc_rx_freq = 0; +static int voc_tx_freq = 0; + +struct session_freq { + int freq; + int evt; +}; + +struct audio_routing_info { + unsigned short mixer_mask[MAX_SESSIONS]; + unsigned short audrec_mixer_mask[MAX_SESSIONS]; + struct session_freq dec_freq[MAX_SESSIONS]; + struct session_freq enc_freq[MAX_SESSIONS]; + unsigned int copp_list[MAX_SESSIONS][AFE_MAX_PORTS]; + int voice_tx_dev_id; + int voice_rx_dev_id; + int voice_tx_sample_rate; + int voice_rx_sample_rate; + signed int voice_tx_vol; + signed int voice_rx_vol; + int tx_mute; + int rx_mute; + int voice_state; + int call_state; + struct mutex copp_list_mutex; + struct mutex adm_mutex; +}; + +static struct audio_routing_info routing_info; + +struct audio_copp_topology { + struct mutex lock; + int session_cnt; + int session_id[MAX_SESSIONS]; + int topolog_id[MAX_SESSIONS]; +}; +static struct audio_copp_topology adm_tx_topology_tbl; + +static struct dev_ctrl_ops default_ctrl_ops; +static struct dev_ctrl_ops *ctrl_ops = &default_ctrl_ops; + +void htc_8x60_register_dev_ctrl_ops(struct dev_ctrl_ops *ops) +{ + ctrl_ops = ops; +} + +int msm_reset_all_device(void) +{ + int rc = 0; + int dev_id = 0; + struct msm_snddev_info *dev_info = NULL; + + for (dev_id = 0; dev_id < audio_dev_ctrl.num_dev; dev_id++) { + dev_info = audio_dev_ctrl_find_dev(dev_id); + if (IS_ERR(dev_info)) { + pr_aud_err("%s:pass invalid dev_id\n", __func__); + rc = PTR_ERR(dev_info); + return rc; + } + if (!dev_info->opened) + continue; + pr_debug("%s:Resetting device %d active on COPP %d" + "with %lld as routing\n", __func__, + dev_id, dev_info->copp_id, dev_info->sessions); + broadcast_event(AUDDEV_EVT_REL_PENDING, + dev_id, + SESSION_IGNORE); + rc = dev_info->dev_ops.close(dev_info); + if (rc < 0) { + pr_aud_err("%s:Snd device failed close!\n", __func__); + return rc; + } else { + dev_info->opened = 0; + broadcast_event(AUDDEV_EVT_DEV_RLS, + dev_id, + SESSION_IGNORE); + + if (dev_info->copp_id == VOICE_PLAYBACK_TX) + voice_start_playback(0); + } + dev_info->sessions = 0; + } + msm_clear_all_session(); + return 0; +} +EXPORT_SYMBOL(msm_reset_all_device); + +int msm_set_copp_id(int session_id, int copp_id) +{ + int rc = 0; + int index; + + if (session_id < 1 || session_id > 8) + return -EINVAL; + if (afe_validate_port(copp_id) < 0) + return -EINVAL; + + index = afe_get_port_index(copp_id); + pr_debug("%s: session[%d] copp_id[%d] index[%d]\n", __func__, + session_id, copp_id, index); + mutex_lock(&routing_info.copp_list_mutex); + if (routing_info.copp_list[session_id][index] == DEVICE_IGNORE) + routing_info.copp_list[session_id][index] = copp_id; + mutex_unlock(&routing_info.copp_list_mutex); + + return rc; +} +EXPORT_SYMBOL(msm_set_copp_id); + +int msm_clear_copp_id(int session_id, int copp_id) +{ + int rc = 0; + int index = afe_get_port_index(copp_id); + + if (session_id < 1 || session_id > 8) { + pr_aud_err("%s: invalid session_id %d\n", __func__, session_id); + return -EINVAL; + } + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid copp_id index %d\n", __func__, index); + return -EINVAL; + } + + pr_debug("%s: session[%d] copp_id[%d] index[%d]\n", __func__, + session_id, copp_id, index); + mutex_lock(&routing_info.copp_list_mutex); + if (routing_info.copp_list[session_id][index] == copp_id) + routing_info.copp_list[session_id][index] = DEVICE_IGNORE; + mutex_unlock(&routing_info.copp_list_mutex); + + return rc; +} +EXPORT_SYMBOL(msm_clear_copp_id); + +int msm_clear_session_id(int session_id) +{ + int rc = 0; + int i = 0; + if (session_id < 1 || session_id > 8) + return -EINVAL; + pr_debug("%s: session[%d]\n", __func__, session_id); + mutex_lock(&routing_info.adm_mutex); + mutex_lock(&routing_info.copp_list_mutex); + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[session_id][i] != DEVICE_IGNORE) { + rc = adm_close(routing_info.copp_list[session_id][i]); + if (rc < 0) { + pr_aud_err("%s: adm close fail port[%d] rc[%d]\n", + __func__, + routing_info.copp_list[session_id][i], + rc); + continue; + } + routing_info.copp_list[session_id][i] = DEVICE_IGNORE; + rc = 0; + } + } + mutex_unlock(&routing_info.copp_list_mutex); + mutex_unlock(&routing_info.adm_mutex); + + return rc; +} +EXPORT_SYMBOL(msm_clear_session_id); + +int msm_clear_all_session() +{ + int rc = 0; + int i = 0, j = 0; + pr_aud_info("%s:\n", __func__); + mutex_lock(&routing_info.adm_mutex); + mutex_lock(&routing_info.copp_list_mutex); + for (j = 1; j < MAX_SESSIONS; j++) { + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[j][i] != DEVICE_IGNORE) { + rc = adm_close( + routing_info.copp_list[j][i]); + if (rc < 0) { + pr_aud_err("%s: adm close fail copp[%d]" + "session[%d] rc[%d]\n", + __func__, + routing_info.copp_list[j][i], + j, rc); + continue; + } + routing_info.copp_list[j][i] = DEVICE_IGNORE; + rc = 0; + } + } + } + mutex_unlock(&routing_info.copp_list_mutex); + mutex_unlock(&routing_info.adm_mutex); + return rc; +} +EXPORT_SYMBOL(msm_clear_all_session); + +int msm_get_voice_state(void) +{ + pr_debug("voice state %d\n", routing_info.voice_state); + return routing_info.voice_state; +} +EXPORT_SYMBOL(msm_get_voice_state); + +int msm_get_call_state(void) +{ + pr_debug("call state %d\n", routing_info.call_state); + return routing_info.call_state; +} +EXPORT_SYMBOL(msm_get_call_state); + +int msm_set_voice_mute(int dir, int mute) +{ + pr_debug("dir %x mute %x\n", dir, mute); + if (dir == DIR_TX) { + routing_info.tx_mute = mute; + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, + routing_info.voice_tx_dev_id, SESSION_IGNORE); + } else + return -EPERM; + return 0; +} +EXPORT_SYMBOL(msm_set_voice_mute); + +int msm_set_voice_vol(int dir, s32 volume) +{ + if (dir == DIR_TX) { + routing_info.voice_tx_vol = volume; + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, + routing_info.voice_tx_dev_id, + SESSION_IGNORE); + } else if (dir == DIR_RX) { + routing_info.voice_rx_vol = volume; + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, + routing_info.voice_rx_dev_id, + SESSION_IGNORE); + } else + return -EINVAL; + return 0; +} +EXPORT_SYMBOL(msm_set_voice_vol); + +void msm_snddev_register(struct msm_snddev_info *dev_info) +{ + mutex_lock(&session_lock); + if (audio_dev_ctrl.num_dev < AUDIO_DEV_CTL_MAX_DEV) { + audio_dev_ctrl.devs[audio_dev_ctrl.num_dev] = dev_info; + dev_info->dev_volume = 50; /* 50% */ + dev_info->sessions = 0x0; + dev_info->usage_count = 0; + audio_dev_ctrl.num_dev++; + } else + pr_aud_err("%s: device registry max out\n", __func__); + mutex_unlock(&session_lock); +} +EXPORT_SYMBOL(msm_snddev_register); + +int msm_snddev_devcount(void) +{ + return audio_dev_ctrl.num_dev; +} +EXPORT_SYMBOL(msm_snddev_devcount); + +int msm_snddev_query(int dev_id) +{ + if (dev_id <= audio_dev_ctrl.num_dev) + return 0; + return -ENODEV; +} +EXPORT_SYMBOL(msm_snddev_query); + +int msm_snddev_is_set(int popp_id, int copp_id) +{ + return routing_info.mixer_mask[popp_id] & (0x1 << copp_id); +} +EXPORT_SYMBOL(msm_snddev_is_set); + +unsigned short msm_snddev_route_enc(int enc_id) +{ + if (enc_id >= MAX_SESSIONS) + return -EINVAL; + return routing_info.audrec_mixer_mask[enc_id]; +} +EXPORT_SYMBOL(msm_snddev_route_enc); + +unsigned short msm_snddev_route_dec(int popp_id) +{ + if (popp_id >= MAX_SESSIONS) + return -EINVAL; + return routing_info.mixer_mask[popp_id]; +} +EXPORT_SYMBOL(msm_snddev_route_dec); + +/*To check one->many case*/ +int msm_check_multicopp_per_stream(int session_id, + struct route_payload *payload) +{ + int i = 0; + int flag = 0; + pr_debug("%s: session_id=%d\n", __func__, session_id); + mutex_lock(&routing_info.copp_list_mutex); + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[session_id][i] == DEVICE_IGNORE) + continue; + else { + pr_debug("Device enabled port_id = %d\n", + routing_info.copp_list[session_id][i]); + payload->copp_ids[flag++] = + routing_info.copp_list[session_id][i]; + } + } + mutex_unlock(&routing_info.copp_list_mutex); + if (flag > 1) { + pr_debug("Multiple copp per stream case num_copps=%d\n", flag); + } else { + pr_debug("Stream routed to single copp\n"); + } + payload->num_copps = flag; + return flag; +} + +int msm_snddev_set_dec(int popp_id, int copp_id, int set, + int rate, int mode) +{ + int rc = 0, i = 0; + struct route_payload payload; + int topology = DEFAULT_COPP_TOPOLOGY; + + if ((popp_id >= MAX_SESSIONS) || (popp_id <= 0)) { + pr_aud_err("%s: Invalid session id %d\n", __func__, popp_id); + return 0; + } + + mutex_lock(&routing_info.adm_mutex); + if (set) { + if (ctrl_ops->support_opendsp) { + if (ctrl_ops->support_opendsp()) + topology = HTC_COPP_TOPOLOGY; + } + pr_aud_info("%s, topology = 0x%x\n", __func__, topology); + rc = adm_open(copp_id, PLAYBACK, rate, mode, + topology); + if (rc < 0) { + pr_aud_err("%s: adm open fail rc[%d]\n", __func__, rc); + rc = -EINVAL; + mutex_unlock(&routing_info.adm_mutex); + return rc; + } + msm_set_copp_id(popp_id, copp_id); + pr_debug("%s:Session id=%d copp_id=%d\n", + __func__, popp_id, copp_id); + memset(payload.copp_ids, DEVICE_IGNORE, + (sizeof(unsigned int) * AFE_MAX_PORTS)); + rc = msm_check_multicopp_per_stream(popp_id, &payload); + /* Multiple streams per copp is handled, one stream at a time */ + rc = adm_matrix_map(popp_id, PLAYBACK, rc, + payload.copp_ids, copp_id); + if (rc < 0) { + pr_aud_err("%s: matrix map failed rc[%d]\n", + __func__, rc); + adm_close(copp_id); + rc = -EINVAL; + mutex_unlock(&routing_info.adm_mutex); + return rc; + } + } else { + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[popp_id][i] == copp_id) { + rc = adm_close(copp_id); + if (rc < 0) { + pr_aud_err("%s: adm close fail copp[%d]" + "rc[%d]\n", + __func__, copp_id, rc); + rc = -EINVAL; + mutex_unlock(&routing_info.adm_mutex); + return rc; + } + msm_clear_copp_id(popp_id, copp_id); + break; + } + } + } + + if (copp_id == VOICE_PLAYBACK_TX) { + /* Signal uplink playback. */ + rc = voice_start_playback(set); + } + mutex_unlock(&routing_info.adm_mutex); + return rc; +} +EXPORT_SYMBOL(msm_snddev_set_dec); + + +static int check_tx_copp_topology(int session_id) +{ + int cnt; + int ret_val = -ENOENT; + + cnt = adm_tx_topology_tbl.session_cnt; + if (cnt) { + do { + if (adm_tx_topology_tbl.session_id[cnt-1] + == session_id) + ret_val = cnt-1; + } while (--cnt); + } + + return ret_val; +} + +static int add_to_tx_topology_lists(int session_id, int topology) +{ + int idx = 0, tbl_idx; + int ret_val = -ENOSPC; + + mutex_lock(&adm_tx_topology_tbl.lock); + + tbl_idx = check_tx_copp_topology(session_id); + if (tbl_idx == -ENOENT) { + while (adm_tx_topology_tbl.session_id[idx++]) + ; + tbl_idx = idx-1; + } + + if (tbl_idx < MAX_SESSIONS) { + adm_tx_topology_tbl.session_id[tbl_idx] = session_id; + adm_tx_topology_tbl.topolog_id[tbl_idx] = topology; + adm_tx_topology_tbl.session_cnt++; + + ret_val = 0; + } + mutex_unlock(&adm_tx_topology_tbl.lock); + return ret_val; +} + +static void remove_from_tx_topology_lists(int session_id) +{ + int tbl_idx; + + mutex_lock(&adm_tx_topology_tbl.lock); + tbl_idx = check_tx_copp_topology(session_id); + if (tbl_idx != -ENOENT) { + + adm_tx_topology_tbl.session_cnt--; + adm_tx_topology_tbl.session_id[tbl_idx] = 0; + adm_tx_topology_tbl.topolog_id[tbl_idx] = 0; + } + mutex_unlock(&adm_tx_topology_tbl.lock); +} + +int auddev_cfg_tx_copp_topology(int session_id, int cfg) +{ + int ret = 0; + + if (cfg == DEFAULT_COPP_TOPOLOGY) + remove_from_tx_topology_lists(session_id); + else { + switch (cfg) { + case VPM_TX_SM_ECNS_COPP_TOPOLOGY: + case VPM_TX_DM_FLUENCE_COPP_TOPOLOGY: + case HTC_STEREO_RECORD_TOPOLOGY: + ret = add_to_tx_topology_lists(session_id, cfg); + break; + + default: + ret = -ENODEV; + break; + } + } + return ret; +} + +int msm_snddev_set_enc(int popp_id, int copp_id, int set, + int rate, int mode) +{ + int topology; + int tbl_idx; + int rc = 0, i = 0; + mutex_lock(&routing_info.adm_mutex); + if (set) { + mutex_lock(&adm_tx_topology_tbl.lock); + tbl_idx = check_tx_copp_topology(popp_id); + if (tbl_idx == -ENOENT) + topology = DEFAULT_COPP_TOPOLOGY; + else { + topology = adm_tx_topology_tbl.topolog_id[tbl_idx]; + rate = 16000; + } + mutex_unlock(&adm_tx_topology_tbl.lock); + pr_aud_info("%s, topology = 0x%x\n", __func__, topology); + rc = adm_open(copp_id, LIVE_RECORDING, rate, mode, topology); + if (rc < 0) { + pr_aud_err("%s: adm open fail rc[%d]\n", __func__, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 1, + (unsigned int *)&copp_id, copp_id); + if (rc < 0) { + pr_aud_err("%s: matrix map failed rc[%d]\n", __func__, rc); + adm_close(copp_id); + rc = -EINVAL; + goto fail_cmd; + } + msm_set_copp_id(popp_id, copp_id); + } else { + for (i = 0; i < AFE_MAX_PORTS; i++) { + if (routing_info.copp_list[popp_id][i] == copp_id) { + rc = adm_close(copp_id); + if (rc < 0) { + pr_aud_err("%s: adm close fail copp[%d]" + "rc[%d]\n", + __func__, copp_id, rc); + rc = -EINVAL; + goto fail_cmd; + } + msm_clear_copp_id(popp_id, copp_id); + break; + } + } + } +fail_cmd: + mutex_unlock(&routing_info.adm_mutex); + return rc; +} +EXPORT_SYMBOL(msm_snddev_set_enc); + +int msm_device_is_voice(int dev_id) +{ + if ((dev_id == routing_info.voice_rx_dev_id) + || (dev_id == routing_info.voice_tx_dev_id)) + return 0; + else + return -EINVAL; +} +EXPORT_SYMBOL(msm_device_is_voice); + +int msm_set_voc_route(struct msm_snddev_info *dev_info, + int stream_type, int dev_id) +{ + int rc = 0; + u64 session_mask = 0; + + if (dev_info == NULL) { + pr_aud_err("%s: invalid param\n", __func__); + return -EINVAL; + } + + mutex_lock(&session_lock); + switch (stream_type) { + case AUDIO_ROUTE_STREAM_VOICE_RX: + if (audio_dev_ctrl.voice_rx_dev) + audio_dev_ctrl.voice_rx_dev->sessions &= ~0xFFFF; + + if (!(dev_info->capability & SNDDEV_CAP_RX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + audio_dev_ctrl.voice_rx_dev = dev_info; + if (audio_dev_ctrl.voice_rx_dev) { + session_mask = + ((u64)0x1) << (MAX_BIT_PER_CLIENT * \ + ((int)AUDDEV_CLNT_VOC-1)); + audio_dev_ctrl.voice_rx_dev->sessions |= + session_mask; + } + routing_info.voice_rx_dev_id = dev_id; + break; + case AUDIO_ROUTE_STREAM_VOICE_TX: + if (audio_dev_ctrl.voice_tx_dev) + audio_dev_ctrl.voice_tx_dev->sessions &= ~0xFFFF; + + if (!(dev_info->capability & SNDDEV_CAP_TX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + + audio_dev_ctrl.voice_tx_dev = dev_info; + if (audio_dev_ctrl.voice_rx_dev) { + session_mask = + ((u64)0x1) << (MAX_BIT_PER_CLIENT * \ + ((int)AUDDEV_CLNT_VOC-1)); + audio_dev_ctrl.voice_tx_dev->sessions |= + session_mask; + } + routing_info.voice_tx_dev_id = dev_id; + break; + default: + rc = -EINVAL; + } + mutex_unlock(&session_lock); + return rc; +} +EXPORT_SYMBOL(msm_set_voc_route); + +void msm_release_voc_thread(void) +{ + wake_up(&audio_dev_ctrl.wait); +} +EXPORT_SYMBOL(msm_release_voc_thread); + +int msm_snddev_get_enc_freq(session_id) +{ + return routing_info.enc_freq[session_id].freq; +} +EXPORT_SYMBOL(msm_snddev_get_enc_freq); + +int msm_get_voc_freq(int *tx_freq, int *rx_freq) +{ + *tx_freq = (0 == voc_tx_freq ? routing_info.voice_tx_sample_rate + : voc_tx_freq); + *rx_freq = (0 == voc_rx_freq ? routing_info.voice_rx_sample_rate + : voc_rx_freq); + return 0; +} +EXPORT_SYMBOL(msm_get_voc_freq); + +void msm_set_voc_freq(int tx_freq, int rx_freq) +{ + voc_tx_freq = tx_freq; + voc_rx_freq = rx_freq; +} +EXPORT_SYMBOL(msm_set_voc_freq); + + +int msm_get_voc_route(u32 *rx_id, u32 *tx_id) +{ + int rc = 0; + + if (!rx_id || !tx_id) + return -EINVAL; + + mutex_lock(&session_lock); + if (!audio_dev_ctrl.voice_rx_dev || !audio_dev_ctrl.voice_tx_dev) { + rc = -ENODEV; + mutex_unlock(&session_lock); + return rc; + } + + *rx_id = audio_dev_ctrl.voice_rx_dev->acdb_id; + *tx_id = audio_dev_ctrl.voice_tx_dev->acdb_id; + + mutex_unlock(&session_lock); + + return rc; +} +EXPORT_SYMBOL(msm_get_voc_route); + +struct msm_snddev_info *audio_dev_ctrl_find_dev(u32 dev_id) +{ + struct msm_snddev_info *info; + + if ((audio_dev_ctrl.num_dev - 1) < dev_id) { + info = ERR_PTR(-ENODEV); + goto error; + } + + info = audio_dev_ctrl.devs[dev_id]; +error: + return info; + +} +EXPORT_SYMBOL(audio_dev_ctrl_find_dev); + +int snddev_voice_set_volume(int vol, int path) +{ + if (audio_dev_ctrl.voice_rx_dev + && audio_dev_ctrl.voice_tx_dev) { + if (path) + audio_dev_ctrl.voice_tx_dev->dev_volume = vol; + else + audio_dev_ctrl.voice_rx_dev->dev_volume = vol; + } else + return -ENODEV; + return 0; +} +EXPORT_SYMBOL(snddev_voice_set_volume); + +static int audio_dev_ctrl_get_devices(struct audio_dev_ctrl_state *dev_ctrl, + void __user *arg) +{ + int rc = 0; + u32 index; + struct msm_snd_device_list work_list; + struct msm_snd_device_info *work_tbl; + + if (copy_from_user(&work_list, arg, sizeof(work_list))) { + rc = -EFAULT; + goto error; + } + + if (work_list.num_dev > dev_ctrl->num_dev) { + rc = -EINVAL; + goto error; + } + + work_tbl = kmalloc(work_list.num_dev * + sizeof(struct msm_snd_device_info), GFP_KERNEL); + if (!work_tbl) { + rc = -ENOMEM; + goto error; + } + + for (index = 0; index < dev_ctrl->num_dev; index++) { + work_tbl[index].dev_id = index; + work_tbl[index].dev_cap = dev_ctrl->devs[index]->capability; + strlcpy(work_tbl[index].dev_name, dev_ctrl->devs[index]->name, + 64); + } + + if (copy_to_user((void *) (work_list.list), work_tbl, + work_list.num_dev * sizeof(struct msm_snd_device_info))) + rc = -EFAULT; + kfree(work_tbl); +error: + return rc; +} + + +int auddev_register_evt_listner(u32 evt_id, u32 clnt_type, u32 clnt_id, + void (*listner)(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data), + void *private_data) +{ + int rc; + struct msm_snd_evt_listner *callback = NULL; + struct msm_snd_evt_listner *new_cb; + + new_cb = kzalloc(sizeof(struct msm_snd_evt_listner), GFP_KERNEL); + if (!new_cb) { + pr_aud_err("No memory to add new listener node\n"); + return -ENOMEM; + } + + mutex_lock(&session_lock); + new_cb->cb_next = NULL; + new_cb->auddev_evt_listener = listner; + new_cb->evt_id = evt_id; + new_cb->clnt_type = clnt_type; + new_cb->clnt_id = clnt_id; + new_cb->private_data = private_data; + if (event.cb == NULL) { + event.cb = new_cb; + new_cb->cb_prev = NULL; + } else { + callback = event.cb; + for (; ;) { + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + callback->cb_next = new_cb; + new_cb->cb_prev = callback; + } + event.num_listner++; + mutex_unlock(&session_lock); + rc = 0; + return rc; +} +EXPORT_SYMBOL(auddev_register_evt_listner); + +int auddev_unregister_evt_listner(u32 clnt_type, u32 clnt_id) +{ + struct msm_snd_evt_listner *callback = event.cb; + struct msm_snddev_info *info; + u64 session_mask = 0; + int i = 0; + + mutex_lock(&session_lock); + while (callback != NULL) { + if ((callback->clnt_type == clnt_type) + && (callback->clnt_id == clnt_id)) + break; + callback = callback->cb_next; + } + if (callback == NULL) { + mutex_unlock(&session_lock); + return -EINVAL; + } + + if ((callback->cb_next == NULL) && (callback->cb_prev == NULL)) + event.cb = NULL; + else if (callback->cb_next == NULL) + callback->cb_prev->cb_next = NULL; + else if (callback->cb_prev == NULL) { + callback->cb_next->cb_prev = NULL; + event.cb = callback->cb_next; + } else { + callback->cb_prev->cb_next = callback->cb_next; + callback->cb_next->cb_prev = callback->cb_prev; + } + kfree(callback); + + session_mask = (((u64)0x1) << clnt_id) << (MAX_BIT_PER_CLIENT * \ + ((int)clnt_type-1)); + for (i = 0; i < audio_dev_ctrl.num_dev; i++) { + info = audio_dev_ctrl.devs[i]; + info->sessions &= ~session_mask; + } + mutex_unlock(&session_lock); + return 0; +} +EXPORT_SYMBOL(auddev_unregister_evt_listner); + +int msm_snddev_withdraw_freq(u32 session_id, u32 capability, u32 clnt_type) +{ + int i = 0; + struct msm_snddev_info *info; + u64 session_mask = 0; + + if ((clnt_type == AUDDEV_CLNT_VOC) && (session_id != 0)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_DEC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_ENC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + + session_mask = (((u64)0x1) << session_id) << (MAX_BIT_PER_CLIENT * \ + ((int)clnt_type-1)); + + for (i = 0; i < audio_dev_ctrl.num_dev; i++) { + info = audio_dev_ctrl.devs[i]; + if ((info->sessions & session_mask) + && (info->capability & capability)) { + if (!(info->sessions & ~(session_mask))) + info->set_sample_rate = 0; + } + } + if (clnt_type == AUDDEV_CLNT_DEC) + routing_info.dec_freq[session_id].freq + = 0; + else if (clnt_type == AUDDEV_CLNT_ENC) + routing_info.enc_freq[session_id].freq + = 0; + else if (capability == SNDDEV_CAP_TX) + routing_info.voice_tx_sample_rate = 0; + else + routing_info.voice_rx_sample_rate = 48000; + return 0; +} + +int msm_snddev_request_freq(int *freq, u32 session_id, + u32 capability, u32 clnt_type) +{ + int i = 0; + int rc = 0; + struct msm_snddev_info *info; + u32 set_freq; + u64 session_mask = 0; + u64 clnt_type_mask = 0; + + pr_debug(": clnt_type 0x%08x\n", clnt_type); + + if ((clnt_type == AUDDEV_CLNT_VOC) && (session_id != 0)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_DEC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + if ((clnt_type == AUDDEV_CLNT_ENC) + && (session_id >= MAX_SESSIONS)) + return -EINVAL; + session_mask = (((u64)0x1) << session_id) << (MAX_BIT_PER_CLIENT * \ + ((int)clnt_type-1)); + clnt_type_mask = (0xFFFF << (MAX_BIT_PER_CLIENT * (clnt_type-1))); + if (!(*freq == 8000) && !(*freq == 11025) && + !(*freq == 12000) && !(*freq == 16000) && + !(*freq == 22050) && !(*freq == 24000) && + !(*freq == 32000) && !(*freq == 44100) && + !(*freq == 48000)) + return -EINVAL; + + for (i = 0; i < audio_dev_ctrl.num_dev; i++) { + info = audio_dev_ctrl.devs[i]; + if ((info->sessions & session_mask) + && (info->capability & capability)) { + rc = 0; + if ((info->sessions & ~clnt_type_mask) + && ((*freq != 8000) && (*freq != 16000) + && (*freq != 48000))) { + if (clnt_type == AUDDEV_CLNT_ENC) { + routing_info.enc_freq[session_id].freq + = 0; + return -EPERM; + } else if (clnt_type == AUDDEV_CLNT_DEC) { + routing_info.dec_freq[session_id].freq + = 0; + return -EPERM; + } + } + if (*freq == info->set_sample_rate) { + rc = info->set_sample_rate; + continue; + } + set_freq = MAX(*freq, info->set_sample_rate); + + + if (clnt_type == AUDDEV_CLNT_DEC) { + routing_info.dec_freq[session_id].evt = 1; + routing_info.dec_freq[session_id].freq + = set_freq; + } else if (clnt_type == AUDDEV_CLNT_ENC) { + routing_info.enc_freq[session_id].evt = 1; + routing_info.enc_freq[session_id].freq + = set_freq; + } else if (capability == SNDDEV_CAP_TX) + routing_info.voice_tx_sample_rate = set_freq; + + rc = set_freq; + info->set_sample_rate = set_freq; + *freq = info->set_sample_rate; + + if (info->opened) { + broadcast_event(AUDDEV_EVT_FREQ_CHG, i, + SESSION_IGNORE); + set_freq = info->dev_ops.set_freq(info, + set_freq); + broadcast_event(AUDDEV_EVT_DEV_RDY, i, + SESSION_IGNORE); + } + } + pr_debug("info->set_sample_rate = %d\n", info->set_sample_rate); + pr_debug("routing_info.enc_freq.freq = %d\n", + routing_info.enc_freq[session_id].freq); + } + return rc; +} +EXPORT_SYMBOL(msm_snddev_request_freq); + +int msm_snddev_enable_sidetone(u32 dev_id, u32 enable, uint16_t gain) +{ + int rc; + struct msm_snddev_info *dev_info; + + pr_debug("dev_id %d enable %d\n", dev_id, enable); + + dev_info = audio_dev_ctrl_find_dev(dev_id); + + if (IS_ERR(dev_info)) { + pr_aud_err("bad dev_id %d\n", dev_id); + rc = -EINVAL; + } else if (!dev_info->dev_ops.enable_sidetone) { + pr_debug("dev %d no sidetone support\n", dev_id); + rc = -EPERM; + } else + rc = dev_info->dev_ops.enable_sidetone(dev_info, enable, gain); + + return rc; +} +EXPORT_SYMBOL(msm_snddev_enable_sidetone); + +int msm_enable_incall_recording(int popp_id, int rec_mode, int rate, + int channel_mode) +{ + int rc = 0; + unsigned int port_id[2]; + port_id[0] = VOICE_RECORD_TX; + port_id[1] = VOICE_RECORD_RX; + + pr_debug("%s: popp_id %d, rec_mode %d, rate %d, channel_mode %d\n", + __func__, popp_id, rec_mode, rate, channel_mode); + + mutex_lock(&routing_info.adm_mutex); + + if (rec_mode == VOC_REC_UPLINK) { + rc = afe_start_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[0], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 1, + &port_id[0], port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM matrix map %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[0]); + + } else if (rec_mode == VOC_REC_DOWNLINK) { + rc = afe_start_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[1], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 1, + &port_id[1], port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM matrix map %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[1]); + + } else if (rec_mode == VOC_REC_BOTH) { + rc = afe_start_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[0], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[0]); + + rc = afe_start_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port start\n", + __func__, rc); + + goto fail_cmd; + } + + rc = adm_open(port_id[1], LIVE_RECORDING, rate, channel_mode, + DEFAULT_COPP_TOPOLOGY); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM open %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + rc = adm_matrix_map(popp_id, LIVE_RECORDING, 2, + &port_id[0], port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM matrix map\n", + __func__, rc); + + goto fail_cmd; + } + + msm_set_copp_id(popp_id, port_id[1]); + } else { + pr_aud_err("%s Unknown rec_mode %d\n", __func__, rec_mode); + + goto fail_cmd; + } + + rc = voice_start_record(rec_mode, 1); + +fail_cmd: + mutex_unlock(&routing_info.adm_mutex); + return rc; +} + +int msm_disable_incall_recording(uint32_t popp_id, uint32_t rec_mode) +{ + int rc = 0; + uint32_t port_id[2]; + port_id[0] = VOICE_RECORD_TX; + port_id[1] = VOICE_RECORD_RX; + + pr_debug("%s: popp_id %d, rec_mode %d\n", __func__, popp_id, rec_mode); + + mutex_lock(&routing_info.adm_mutex); + + rc = voice_start_record(rec_mode, 0); + if (rc < 0) { + pr_aud_err("%s: Error %d stopping record\n", __func__, rc); + + goto fail_cmd; + } + + if (rec_mode == VOC_REC_UPLINK) { + rc = adm_close(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[0]); + + rc = afe_stop_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + + } else if (rec_mode == VOC_REC_DOWNLINK) { + rc = adm_close(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[1]); + + rc = afe_stop_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + } else if (rec_mode == VOC_REC_BOTH) { + rc = adm_close(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[0]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[0]); + + rc = afe_stop_pseudo_port(port_id[0]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Tx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + + rc = adm_close(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in ADM close %d\n", + __func__, rc, port_id[1]); + + goto fail_cmd; + } + + msm_clear_copp_id(popp_id, port_id[1]); + + rc = afe_stop_pseudo_port(port_id[1]); + if (rc < 0) { + pr_aud_err("%s: Error %d in Rx pseudo port stop\n", + __func__, rc); + goto fail_cmd; + } + } else { + pr_aud_err("%s Unknown rec_mode %d\n", __func__, rec_mode); + + goto fail_cmd; + } + +fail_cmd: + mutex_unlock(&routing_info.adm_mutex); + return rc; +} + +static long audio_dev_ctrl_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int rc = 0; + struct audio_dev_ctrl_state *dev_ctrl = file->private_data; + + mutex_lock(&session_lock); + switch (cmd) { + case AUDIO_GET_NUM_SND_DEVICE: + rc = put_user(dev_ctrl->num_dev, (uint32_t __user *) arg); + break; + case AUDIO_GET_SND_DEVICES: + rc = audio_dev_ctrl_get_devices(dev_ctrl, (void __user *) arg); + break; + case AUDIO_ENABLE_SND_DEVICE: { + struct msm_snddev_info *dev_info; + u32 dev_id; + + if (get_user(dev_id, (u32 __user *) arg)) { + rc = -EFAULT; + break; + } + dev_info = audio_dev_ctrl_find_dev(dev_id); + if (IS_ERR(dev_info)) + rc = PTR_ERR(dev_info); + else { + rc = dev_info->dev_ops.open(dev_info); + if (!rc) + dev_info->opened = 1; + wake_up(&audio_dev_ctrl.wait); + } + break; + + } + + case AUDIO_DISABLE_SND_DEVICE: { + struct msm_snddev_info *dev_info; + u32 dev_id; + + if (get_user(dev_id, (u32 __user *) arg)) { + rc = -EFAULT; + break; + } + dev_info = audio_dev_ctrl_find_dev(dev_id); + if (IS_ERR(dev_info)) + rc = PTR_ERR(dev_info); + else { + rc = dev_info->dev_ops.close(dev_info); + dev_info->opened = 0; + } + break; + } + + case AUDIO_ROUTE_STREAM: { + struct msm_audio_route_config route_cfg; + struct msm_snddev_info *dev_info; + + if (copy_from_user(&route_cfg, (void __user *) arg, + sizeof(struct msm_audio_route_config))) { + rc = -EFAULT; + break; + } + pr_debug("%s: route cfg %d %d type\n", __func__, + route_cfg.dev_id, route_cfg.stream_type); + dev_info = audio_dev_ctrl_find_dev(route_cfg.dev_id); + if (IS_ERR(dev_info)) { + pr_aud_err("%s: pass invalid dev_id\n", __func__); + rc = PTR_ERR(dev_info); + break; + } + + switch (route_cfg.stream_type) { + + case AUDIO_ROUTE_STREAM_VOICE_RX: + if (!(dev_info->capability & SNDDEV_CAP_RX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + dev_ctrl->voice_rx_dev = dev_info; + break; + case AUDIO_ROUTE_STREAM_VOICE_TX: + if (!(dev_info->capability & SNDDEV_CAP_TX) | + !(dev_info->capability & SNDDEV_CAP_VOICE)) { + rc = -EINVAL; + break; + } + dev_ctrl->voice_tx_dev = dev_info; + break; + } + break; + } + + default: + rc = -EINVAL; + } + mutex_unlock(&session_lock); + return rc; +} + + +static int audio_dev_ctrl_open(struct inode *inode, struct file *file) +{ + pr_debug("open audio_dev_ctrl\n"); + atomic_inc(&audio_dev_ctrl.opened); + file->private_data = &audio_dev_ctrl; + return 0; +} + +static int audio_dev_ctrl_release(struct inode *inode, struct file *file) +{ + pr_debug("release audio_dev_ctrl\n"); + atomic_dec(&audio_dev_ctrl.opened); + return 0; +} + +static const struct file_operations audio_dev_ctrl_fops = { + .owner = THIS_MODULE, + .open = audio_dev_ctrl_open, + .release = audio_dev_ctrl_release, + .unlocked_ioctl = audio_dev_ctrl_ioctl, +}; + + +struct miscdevice audio_dev_ctrl_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_audio_dev_ctrl", + .fops = &audio_dev_ctrl_fops, +}; + +/* session id is 64 bit routing mask per device + * 0-15 for voice clients + * 16-31 for Decoder clients + * 32-47 for Encoder clients + * 48-63 Do not care + */ +void broadcast_event(u32 evt_id, u32 dev_id, u64 session_id) +{ + int clnt_id = 0, i; + union auddev_evt_data *evt_payload = NULL; + struct msm_snd_evt_listner *callback; + struct msm_snddev_info *dev_info = NULL; + u64 session_mask = 0; + static int pending_sent; + + pr_debug(": evt_id = %d\n", evt_id); + + if ((evt_id != AUDDEV_EVT_START_VOICE) + && (evt_id != AUDDEV_EVT_END_VOICE) + && (evt_id != AUDDEV_EVT_STREAM_VOL_CHG) + && (evt_id != AUDDEV_EVT_VOICE_STATE_CHG)) + dev_info = audio_dev_ctrl_find_dev(dev_id); + +#ifdef CONFIG_MSM8X60_RTAC + update_rtac(evt_id, dev_id, dev_info); +#endif + + if (event.cb != NULL) + callback = event.cb; + else + return; + mutex_lock(&session_lock); + + if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + routing_info.voice_state = dev_id; + + evt_payload = kzalloc(sizeof(union auddev_evt_data), + GFP_KERNEL); + + if (evt_payload == NULL) { + pr_aud_err("%s: fail to allocate evt_payload", __func__); + return; + } + + for (; ;) { + if (!(evt_id & callback->evt_id)) { + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + clnt_id = callback->clnt_id; + memset(evt_payload, 0, sizeof(union auddev_evt_data)); + + if (evt_id == AUDDEV_EVT_START_VOICE) + routing_info.call_state = 1; + if (evt_id == AUDDEV_EVT_END_VOICE) + routing_info.call_state = 0; + + if ((evt_id == AUDDEV_EVT_START_VOICE) + || (evt_id == AUDDEV_EVT_END_VOICE)) + goto skip_check; + if (callback->clnt_type == AUDDEV_CLNT_AUDIOCAL) + goto aud_cal; + + session_mask = (((u64)0x1) << clnt_id) + << (MAX_BIT_PER_CLIENT * \ + ((int)callback->clnt_type-1)); + + if ((evt_id == AUDDEV_EVT_STREAM_VOL_CHG) || \ + (evt_id == AUDDEV_EVT_VOICE_STATE_CHG)) { + pr_debug("AUDDEV_EVT_STREAM_VOL_CHG or\ + AUDDEV_EVT_VOICE_STATE_CHG\n"); + goto volume_strm; + } + if (dev_info) + pr_debug("dev_info->sessions = %llu\n", dev_info->sessions); + else { + pr_aud_err("dev_info is NULL\n"); + break; + } + if ((!session_id && !(dev_info->sessions & session_mask)) || + (session_id && ((dev_info->sessions & session_mask) != + session_id))) { + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + if (evt_id == AUDDEV_EVT_DEV_CHG_VOICE) + goto voc_events; + +volume_strm: + if (callback->clnt_type == AUDDEV_CLNT_DEC) { + pr_debug("AUDDEV_CLNT_DEC\n"); + if (evt_id == AUDDEV_EVT_STREAM_VOL_CHG) { + pr_debug("clnt_id = %d, session_id = %llu\n", + clnt_id, session_id); + if (session_mask != session_id) + goto sent_dec; + else + evt_payload->session_vol = + msm_vol_ctl.volume; + } else if (evt_id == AUDDEV_EVT_FREQ_CHG) { + if (routing_info.dec_freq[clnt_id].evt) { + routing_info.dec_freq[clnt_id].evt + = 0; + goto sent_dec; + } else if (routing_info.dec_freq[clnt_id].freq + == dev_info->set_sample_rate) + goto sent_dec; + else { + evt_payload->freq_info.sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.dev_type + = dev_info->capability; + evt_payload->freq_info.acdb_dev_id + = dev_info->acdb_id; + } + } else if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else + evt_payload->routing_id = dev_info->copp_id; + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); +sent_dec: + if ((evt_id != AUDDEV_EVT_STREAM_VOL_CHG) && + (evt_id != AUDDEV_EVT_VOICE_STATE_CHG)) + routing_info.dec_freq[clnt_id].freq + = dev_info->set_sample_rate; + + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + if (callback->clnt_type == AUDDEV_CLNT_ENC) { + pr_debug("AUDDEV_CLNT_ENC\n"); + if (evt_id == AUDDEV_EVT_FREQ_CHG) { + if (routing_info.enc_freq[clnt_id].evt) { + routing_info.enc_freq[clnt_id].evt + = 0; + goto sent_enc; + } else { + evt_payload->freq_info.sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.dev_type + = dev_info->capability; + evt_payload->freq_info.acdb_dev_id + = dev_info->acdb_id; + } + } else if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else { + if (dev_info) + evt_payload->routing_id = dev_info->copp_id; + else + pr_aud_info("dev_info == NULL\n"); + } + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); +sent_enc: + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } +aud_cal: + if (callback->clnt_type == AUDDEV_CLNT_AUDIOCAL) { + pr_debug("AUDDEV_CLNT_AUDIOCAL\n"); + if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else if (!dev_info->sessions) + goto sent_aud_cal; + else { + evt_payload->audcal_info.dev_id = + dev_info->copp_id; + evt_payload->audcal_info.acdb_id = + dev_info->acdb_id; + evt_payload->audcal_info.dev_type = + (dev_info->capability & SNDDEV_CAP_TX) ? + SNDDEV_CAP_TX : SNDDEV_CAP_RX; + evt_payload->audcal_info.sample_rate = + dev_info->set_sample_rate ? + dev_info->set_sample_rate : + dev_info->sample_rate; + } + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); + +sent_aud_cal: + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } +skip_check: +voc_events: + if (callback->clnt_type == AUDDEV_CLNT_VOC) { + pr_debug("AUDDEV_CLNT_VOC\n"); + if (evt_id == AUDDEV_EVT_DEV_RLS) { + if (!pending_sent) + goto sent_voc; + else + pending_sent = 0; + } + if (evt_id == AUDDEV_EVT_REL_PENDING) + pending_sent = 1; + + if (evt_id == AUDDEV_EVT_DEVICE_VOL_MUTE_CHG) { + if (dev_info->capability & SNDDEV_CAP_TX) { + evt_payload->voc_vm_info.dev_type = + SNDDEV_CAP_TX; + evt_payload->voc_vm_info.acdb_dev_id = + dev_info->acdb_id; + evt_payload-> + voc_vm_info.dev_vm_val.mute = + routing_info.tx_mute; + } else { + evt_payload->voc_vm_info.dev_type = + SNDDEV_CAP_RX; + evt_payload->voc_vm_info.acdb_dev_id = + dev_info->acdb_id; + evt_payload-> + voc_vm_info.dev_vm_val.vol = + routing_info.voice_rx_vol; + } + } else if ((evt_id == AUDDEV_EVT_START_VOICE) + || (evt_id == AUDDEV_EVT_END_VOICE)) + memset(evt_payload, 0, + sizeof(union auddev_evt_data)); + else if (evt_id == AUDDEV_EVT_FREQ_CHG) { + if (routing_info.voice_tx_sample_rate + != dev_info->set_sample_rate) { + routing_info.voice_tx_sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.sample_rate + = dev_info->set_sample_rate; + evt_payload->freq_info.dev_type + = dev_info->capability; + evt_payload->freq_info.acdb_dev_id + = dev_info->acdb_id; + } else + goto sent_voc; + } else if (evt_id == AUDDEV_EVT_VOICE_STATE_CHG) + evt_payload->voice_state = + routing_info.voice_state; + else { + evt_payload->voc_devinfo.dev_type = + (dev_info->capability & SNDDEV_CAP_TX) ? + SNDDEV_CAP_TX : SNDDEV_CAP_RX; + evt_payload->voc_devinfo.acdb_dev_id = + dev_info->acdb_id; + evt_payload->voc_devinfo.dev_port_id = + dev_info->copp_id; + evt_payload->voc_devinfo.dev_sample = + dev_info->set_sample_rate ? + dev_info->set_sample_rate : + dev_info->sample_rate; + evt_payload->voc_devinfo.dev_id = dev_id; + if (dev_info->capability & SNDDEV_CAP_RX) { + for (i = 0; i < VOC_RX_VOL_ARRAY_NUM; + i++) { + evt_payload-> + voc_devinfo.max_rx_vol[i] = + dev_info->max_voc_rx_vol[i]; + evt_payload + ->voc_devinfo.min_rx_vol[i] = + dev_info->min_voc_rx_vol[i]; + } + } + } + callback->auddev_evt_listener( + evt_id, + evt_payload, + callback->private_data); + if (evt_id == AUDDEV_EVT_DEV_RLS) + dev_info->sessions &= ~(0xFFFF); +sent_voc: + if (callback->cb_next == NULL) + break; + else { + callback = callback->cb_next; + continue; + } + } + } + kfree(evt_payload); + mutex_unlock(&session_lock); +} +EXPORT_SYMBOL(broadcast_event); + + +void mixer_post_event(u32 evt_id, u32 id) +{ + + pr_debug("evt_id = %d\n", evt_id); + switch (evt_id) { + case AUDDEV_EVT_DEV_CHG_VOICE: /* Called from Voice_route */ + broadcast_event(AUDDEV_EVT_DEV_CHG_VOICE, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_DEV_RDY: + broadcast_event(AUDDEV_EVT_DEV_RDY, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_DEV_RLS: + broadcast_event(AUDDEV_EVT_DEV_RLS, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_REL_PENDING: + broadcast_event(AUDDEV_EVT_REL_PENDING, id, SESSION_IGNORE); + break; + case AUDDEV_EVT_DEVICE_VOL_MUTE_CHG: + broadcast_event(AUDDEV_EVT_DEVICE_VOL_MUTE_CHG, id, + SESSION_IGNORE); + break; + case AUDDEV_EVT_STREAM_VOL_CHG: + broadcast_event(AUDDEV_EVT_STREAM_VOL_CHG, id, + SESSION_IGNORE); + break; + case AUDDEV_EVT_START_VOICE: + broadcast_event(AUDDEV_EVT_START_VOICE, + id, SESSION_IGNORE); + break; + case AUDDEV_EVT_END_VOICE: + broadcast_event(AUDDEV_EVT_END_VOICE, + id, SESSION_IGNORE); + break; + case AUDDEV_EVT_FREQ_CHG: + broadcast_event(AUDDEV_EVT_FREQ_CHG, id, SESSION_IGNORE); + break; + default: + break; + } +} +EXPORT_SYMBOL(mixer_post_event); + +static int __init audio_dev_ctrl_init(void) +{ + init_waitqueue_head(&audio_dev_ctrl.wait); + + event.cb = NULL; + + atomic_set(&audio_dev_ctrl.opened, 0); + audio_dev_ctrl.num_dev = 0; + audio_dev_ctrl.voice_tx_dev = NULL; + audio_dev_ctrl.voice_rx_dev = NULL; + routing_info.voice_state = VOICE_STATE_INVALID; + routing_info.call_state = 0; + mutex_init(&adm_tx_topology_tbl.lock); + mutex_init(&routing_info.copp_list_mutex); + mutex_init(&routing_info.adm_mutex); + + memset(routing_info.copp_list, DEVICE_IGNORE, + (sizeof(unsigned int) * MAX_SESSIONS * AFE_MAX_PORTS)); + return misc_register(&audio_dev_ctrl_misc); +} + +static void __exit audio_dev_ctrl_exit(void) +{ +} +module_init(audio_dev_ctrl_init); +module_exit(audio_dev_ctrl_exit); + +MODULE_DESCRIPTION("MSM 8K Audio Device Control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_lpa.c b/arch/arm/mach-msm/qdsp6v3/audio_lpa.c new file mode 100644 index 00000000000..7845a35b3fc --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_lpa.c @@ -0,0 +1,1435 @@ +/* low power audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_lpa.h" + +#include +#include +#include + +#include +#include + +#define MAX_BUF 4 +#define BUFSZ (655360) + +#define AUDDEC_DEC_PCM 0 + +#define AUDLPA_EVENT_NUM 10 /* Default number of pre-allocated event packets */ + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct audlpa_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audlpa_pmem_region { + struct list_head list; + struct file *file; + int fd; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audlpa_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; +}; + +struct audlpa_dec { + char *name; + int dec_attrb; + long (*ioctl)(struct file *, unsigned int, unsigned long); + int (*set_params)(void *); +}; + +static void audlpa_post_event(struct audio *audio, int type, + union msm_audio_event_payload payload); +static unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr, + unsigned long len, int ref_up); +static void audlpa_async_send_data(struct audio *audio, unsigned needed, + uint32_t token); +static int audlpa_pause(struct audio *audio); +static void audlpa_unmap_pmem_region(struct audio *audio); +static long pcm_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int audlpa_set_pcm_params(void *data); + +struct audlpa_dec audlpa_decs[] = { + {"msm_pcm_lp_dec", AUDDEC_DEC_PCM, &pcm_ioctl, + &audlpa_set_pcm_params}, +}; + +static void lpa_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct audio *audio = (struct audio *) private_data; + int rc = 0; + + switch (evt_id) { + case AUDDEV_EVT_STREAM_VOL_CHG: + audio->volume = evt_payload->session_vol; + pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, " + "enabled = %d\n", __func__, audio->volume, + audio->out_enabled); + if (audio->out_enabled == 1) { + if (audio->ac) { + rc = q6asm_set_volume(audio->ac, audio->volume); + if (rc < 0) { + pr_aud_err("%s: Send Volume command failed" + " rc=%d\n", __func__, rc); + } + } + } + break; + default: + pr_aud_err("%s:ERROR:wrong event\n", __func__); + break; + } +} + +static void audlpa_prevent_sleep(struct audio *audio) +{ + pr_debug("%s:\n", __func__); + wake_lock(&audio->wakelock); +} + +static void audlpa_allow_sleep(struct audio *audio) +{ + pr_debug("%s:\n", __func__); + wake_unlock(&audio->wakelock); +} + +/* must be called with audio->lock held */ +static int audio_enable(struct audio *audio) +{ + pr_aud_info("%s\n", __func__); + + return q6asm_run(audio->ac, 0, 0, 0); + +} + +static int audlpa_async_flush(struct audio *audio) +{ + struct audlpa_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + int rc = 0; + + pr_aud_info("%s:out_enabled = %d, drv_status = 0x%x\n", __func__, + audio->out_enabled, audio->drv_status); + if (audio->out_enabled) { + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audlpa_buffer_node, + list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audlpa_post_event(audio, AUDIO_EVENT_WRITE_DONE, + payload); + kfree(buf_node); + } + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audlpa_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, + rc); + } + + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_aud_err("%s: flush cmd failed rc=%d\n", __func__, rc); + + audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; + audio->out_needed = 0; + + if (audio->stopped == 0) { + rc = audio_enable(audio); + if (rc < 0) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->out_enabled = 1; + audio->out_needed = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + wake_up(&audio->write_wait); + } + return rc; +} + +/* must be called with audio->lock held */ +static int audio_disable(struct audio *audio) +{ + int rc = 0; + + pr_aud_info("%s:%d %d\n", __func__, audio->opened, audio->out_enabled); + + if (audio->opened) { + audio->out_enabled = 0; + audio->opened = 0; + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("%s: CLOSE cmd failed\n", __func__); + else + pr_debug("%s: rxed CLOSE resp\n", __func__); + audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; + wake_up(&audio->write_wait); + audio->out_needed = 0; + } + return rc; +} +static int audlpa_pause(struct audio *audio) +{ + int rc = 0; + + pr_aud_info("%s, enabled = %d\n", __func__, + audio->out_enabled); + if (audio->out_enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, rc); + + } else + pr_aud_err("%s: Driver not enabled\n", __func__); + return rc; +} + +/* ------------------- dsp --------------------- */ +static void audlpa_async_send_data(struct audio *audio, unsigned needed, + uint32_t token) +{ + unsigned long flags; + struct audio_client *ac; + int rc = 0; + + pr_debug("%s:\n", __func__); + spin_lock_irqsave(&audio->dsp_lock, flags); + + pr_debug("%s: needed = %d, out_needed = %d, token = 0x%x\n", + __func__, needed, audio->out_needed, token); + if (needed && !audio->wflush) { + audio->out_needed = 1; + if (audio->drv_status & ADRV_STATUS_OBUF_GIVEN) { + /* pop one node out of queue */ + union msm_audio_event_payload evt_payload; + struct audlpa_buffer_node *used_buf; + + used_buf = list_first_entry(&audio->out_queue, + struct audlpa_buffer_node, list); + if (token == used_buf->paddr) { + pr_debug("%s, Release: addr: %lx," + " token = 0x%x\n", __func__, + used_buf->paddr, token); + list_del(&used_buf->list); + evt_payload.aio_buf = used_buf->buf; + audlpa_post_event(audio, AUDIO_EVENT_WRITE_DONE, + evt_payload); + kfree(used_buf); + audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN; + } + } + } + pr_debug("%s: out_needed = %d, stopped = %d, drv_status = 0x%x\n", + __func__, audio->out_needed, audio->stopped, + audio->drv_status); + if (audio->out_needed && (audio->stopped == 0)) { + struct audlpa_buffer_node *next_buf; + struct audio_aio_write_param param; + if (!list_empty(&audio->out_queue)) { + pr_debug("%s: list not empty\n", __func__); + next_buf = list_first_entry(&audio->out_queue, + struct audlpa_buffer_node, list); + if (next_buf) { + pr_debug("%s: Send: addr: %lx\n", __func__, + next_buf->paddr); + ac = audio->ac; + param.paddr = next_buf->paddr; + param.len = next_buf->buf.data_len; + param.msw_ts = 0; + param.lsw_ts = 0; + /* No time stamp valid */ + param.flags = NO_TIMESTAMP; + param.uid = next_buf->paddr; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:q6asm_async_write failed\n", + __func__); + audio->out_needed = 0; + audio->drv_status |= ADRV_STATUS_OBUF_GIVEN; + } + } else if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s: list is empty, reached EOS\n", __func__); + wake_up(&audio->write_wait); + } + } + + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +static int audlpa_events_pending(struct audio *audio) +{ + int empty; + + spin_lock(&audio->event_queue_lock); + empty = !list_empty(&audio->event_queue); + spin_unlock(&audio->event_queue_lock); + return empty || audio->event_abort; +} + +static void audlpa_reset_event_queue(struct audio *audio) +{ + struct audlpa_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock(&audio->event_queue_lock); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audlpa_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audlpa_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock(&audio->event_queue_lock); + + return; +} + +static long audlpa_process_event_req(struct audio *audio, void __user *arg) +{ + long rc; + struct msm_audio_event usr_evt; + struct audlpa_event *drv_evt = NULL; + int timeout; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) + return -EFAULT; + + timeout = (int) usr_evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout( + audio->event_wait, audlpa_events_pending(audio), + msecs_to_jiffies(timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible( + audio->event_wait, audlpa_events_pending(audio)); + } + + if (rc < 0) + return rc; + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock(&audio->event_queue_lock); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audlpa_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt.event_type = drv_evt->event_type; + usr_evt.event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + rc = -1; + spin_unlock(&audio->event_queue_lock); + return rc; + } + spin_unlock(&audio->event_queue_lock); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE || + drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("%s: AUDIO_EVENT_WRITE_DONE completing\n", __func__); + mutex_lock(&audio->lock); + audlpa_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0); + mutex_unlock(&audio->lock); + } + if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt))) + rc = -EFAULT; + + return rc; +} + +static int audlpa_pmem_check(struct audio *audio, + void *vaddr, unsigned long len) +{ + struct audlpa_pmem_region *region_elt; + struct audlpa_pmem_region t = { .vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_aud_err("%s: region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + __func__, vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, + region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audlpa_pmem_add(struct audio *audio, + struct msm_audio_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct audlpa_pmem_region *region; + int rc = -EINVAL; + + pr_aud_info("%s:\n", __func__); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = audlpa_pmem_check(audio, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + region->ref_cnt = 0; + pr_debug("%s: add region paddr %lx vaddr %p, len %lu\n", __func__, + region->paddr, region->vaddr, + region->len); + list_add_tail(®ion->list, &audio->pmem_region_queue); + rc = q6asm_memory_map(audio->ac, (uint32_t)paddr, IN, (uint32_t)len, 1); + if (rc < 0) + pr_aud_err("%s: memory map failed\n", __func__); +end: + return rc; +} + +static int audlpa_pmem_remove(struct audio *audio, + struct msm_audio_pmem_info *info) +{ + struct audlpa_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audlpa_pmem_region, list); + + if ((region != NULL) && (region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("%s: region %p in use ref_cnt %d\n", + __func__, region, region->ref_cnt); + break; + } + rc = q6asm_memory_unmap(audio->ac, + (uint32_t)region->paddr, + IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +static int audlpa_pmem_lookup_vaddr(struct audio *audio, void *addr, + unsigned long len, struct audlpa_pmem_region **region) +{ + struct audlpa_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->pmem_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_aud_err("%s: multiple hits for vaddr %p, len %ld\n", __func__, + addr, len); + list_for_each_entry(region_elt, + &audio->pmem_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) + pr_aud_err("%s: \t%p, %ld --> %p\n", __func__, + region_elt->vaddr, region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +unsigned long audlpa_pmem_fixup(struct audio *audio, void *addr, + unsigned long len, int ref_up) +{ + struct audlpa_pmem_region *region; + unsigned long paddr; + int ret; + + ret = audlpa_pmem_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_aud_err("%s: lookup (%p, %ld) failed\n", __func__, addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + paddr = region->paddr + (addr - region->vaddr); + return paddr; +} + +/* audio -> lock must be held at this point */ +static int audlpa_aio_buf_add(struct audio *audio, unsigned dir, + void __user *arg) +{ + struct audlpa_buffer_node *buf_node; + + buf_node = kmalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) + return -ENOMEM; + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + return -EFAULT; + } + + buf_node->paddr = audlpa_pmem_fixup( + audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.data_len & 0x1)) { + kfree(buf_node); + return -EINVAL; + } + list_add_tail(&buf_node->list, &audio->out_queue); + pr_debug("%s, Added to list: addr: %lx, length = %d\n", + __func__, buf_node->paddr, buf_node->buf.data_len); + audlpa_async_send_data(audio, 0, 0); + } else { + /* read */ + } + return 0; +} + +static int config(struct audio *audio) +{ + int rc = 0; + if (!audio->out_prefill) { + if (audio->codec_ops.set_params != NULL) { + rc = audio->codec_ops.set_params(audio); + audio->out_prefill = 1; + } + } + return rc; +} + +void q6_audlpa_out_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct audio *audio = (struct audio *) priv; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + pr_debug("%s: ASM_DATA_EVENT_WRITE_DONE, token = 0x%x\n", + __func__, token); + audlpa_async_send_data(audio, 1, token); + break; + case ASM_DATA_EVENT_EOS: + case ASM_DATA_CMDRSP_EOS: + pr_debug("%s: ASM_DATA_CMDRSP_EOS, teos = %d\n", __func__, + audio->teos); + if (audio->teos == 0) { + audio->teos = 1; + wake_up(&audio->write_wait); + } + break; + case ASM_SESSION_CMDRSP_GET_SESSION_TIME: + break; + default: + break; + } +} + +static long pcm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + pr_debug("%s: cmd = %d\n", __func__, cmd); + return -EINVAL; +} + +static int audlpa_set_pcm_params(void *data) +{ + struct audio *audio = (struct audio *)data; + int rc; + + rc = q6asm_media_format_block_pcm(audio->ac, audio->out_sample_rate, + audio->out_channel_mode); + if (rc < 0) + pr_aud_err("%s: Format block pcm failed\n", __func__); + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = -EINVAL; + uint64_t timestamp = 0; + uint64_t temp; + + pr_debug("%s: audio_ioctl() cmd = %d\n", __func__, cmd); + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + + pr_aud_info("%s: audio_get_stats command\n", __func__); + memset(&stats, 0, sizeof(stats)); + timestamp = q6asm_get_session_time(audio->ac); + if (timestamp < 0) { + pr_aud_err("%s: Get Session Time return value =%lld\n", + __func__, timestamp); + return -EAGAIN; + } + temp = (timestamp * 2 * audio->out_channel_mode); + temp = temp * (audio->out_sample_rate/1000); + temp = div_u64(temp, 1000); + audio->bytes_consumed = (uint32_t)(temp & 0xFFFFFFFF); + stats.byte_count = audio->bytes_consumed; + stats.unused[0] = (uint32_t)((temp >> 32) & 0xFFFFFFFF); + pr_debug("%s: bytes_consumed:lsb = %d, msb = %d," + "timestamp = %lld\n", __func__, + audio->bytes_consumed, stats.unused[0], timestamp); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + switch (cmd) { + case AUDIO_ENABLE_AUDPP: + break; + + case AUDIO_SET_VOLUME: + break; + + case AUDIO_SET_PAN: + break; + + case AUDIO_SET_EQ: + break; + } + + if (cmd == AUDIO_GET_EVENT) { + pr_debug("%s: AUDIO_GET_EVENT\n", __func__); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audlpa_process_event_req(audio, + (void __user *) arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + return rc; + } + + if (cmd == AUDIO_ABORT_GET_EVENT) { + audio->event_abort = 1; + wake_up(&audio->event_wait); + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + pr_aud_info("%s: AUDIO_START: Session %d\n", __func__, + audio->ac->session); + if (!audio->opened) { + pr_aud_err("%s: Driver not opened\n", __func__); + rc = -EFAULT; + goto fail; + } + rc = config(audio); + if (rc) { + pr_aud_err("%s: Out Configuration failed\n", __func__); + rc = -EFAULT; + goto fail; + } + + rc = audio_enable(audio); + if (rc) { + pr_aud_err("%s: audio enable failed\n", __func__); + rc = -EFAULT; + goto fail; + } else { + struct asm_softpause_params param = { + .enable = SOFT_PAUSE_ENABLE, + .period = SOFT_PAUSE_PERIOD, + .step = SOFT_PAUSE_STEP, + .rampingcurve = SOFT_PAUSE_CURVE_LINEAR, + }; + audio->out_enabled = 1; + audio->out_needed = 1; + rc = q6asm_set_volume(audio->ac, audio->volume); + if (rc < 0) + pr_aud_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + rc = q6asm_set_softpause(audio->ac, ¶m); + if (rc < 0) + pr_aud_err("%s: Send SoftPause Param failed rc=%d\n", + __func__, rc); + rc = q6asm_set_lrgain(audio->ac, 0x2000, 0x2000); + if (rc < 0) + pr_aud_err("%s: Send channel gain failed rc=%d\n", + __func__, rc); + /* disable mute by default */ + rc = q6asm_set_mute(audio->ac, 0); + if (rc < 0) + pr_aud_err("%s: Send mute command failed rc=%d\n", + __func__, rc); + if (!list_empty(&audio->out_queue)) + pr_aud_err("%s: write_list is not empty!!!\n", + __func__); + if (audio->stopped == 1) + audio->stopped = 0; + audlpa_prevent_sleep(audio); + } + break; + + case AUDIO_STOP: + pr_aud_info("%s: AUDIO_STOP: session_id:%d\n", __func__, + audio->ac->session); + audio->stopped = 1; + audlpa_async_flush(audio); + audio->out_enabled = 0; + audio->out_needed = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audlpa_allow_sleep(audio); + break; + + case AUDIO_FLUSH: + pr_aud_info("%s: AUDIO_FLUSH: session_id:%d\n", __func__, + audio->ac->session); + audio->wflush = 1; + if (audio->out_enabled) + rc = audlpa_async_flush(audio); + else + audio->wflush = 0; + audio->wflush = 0; + break; + + case AUDIO_SET_CONFIG:{ + struct msm_audio_config config; + pr_aud_info("%s: AUDIO_SET_CONFIG\n", __func__); + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + pr_aud_err("%s: ERROR: copy from user\n", __func__); + break; + } + if (!((config.channel_count == 1) || + (config.channel_count == 2))) { + rc = -EINVAL; + pr_aud_err("%s: ERROR: config.channel_count == %d\n", + __func__, config.channel_count); + break; + } + + if (!((config.bits == 8) || (config.bits == 16) || + (config.bits == 24))) { + rc = -EINVAL; + pr_aud_err("%s: ERROR: config.bits = %d\n", __func__, + config.bits); + break; + } + audio->out_sample_rate = config.sample_rate; + audio->out_channel_mode = config.channel_count; + audio->out_bits = config.bits; + audio->buffer_count = config.buffer_count; + audio->buffer_size = config.buffer_size; + rc = 0; + break; + } + + case AUDIO_GET_CONFIG:{ + struct msm_audio_config config; + config.buffer_count = audio->buffer_count; + config.buffer_size = audio->buffer_size; + config.sample_rate = audio->out_sample_rate; + config.channel_count = audio->out_channel_mode; + config.bits = audio->out_bits; + + config.meta_field = 0; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) + rc = -EFAULT; + else + rc = 0; + break; + } + + case AUDIO_PAUSE: + pr_aud_info("%s: AUDIO_PAUSE %ld\n", __func__, arg); + if (arg == 1) { + rc = audlpa_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause FAILED rc=%d\n", __func__, + rc); + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audio_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", + __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->out_enabled = 1; + } + } + } + break; + + case AUDIO_REGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_aud_info("%s: AUDIO_REGISTER_PMEM\n", __func__); + if (copy_from_user(&info, (void *) arg, sizeof(info))) + rc = -EFAULT; + else + rc = audlpa_pmem_add(audio, &info); + break; + } + + case AUDIO_DEREGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_aud_info("%s: AUDIO_DEREGISTER_PMEM\n", __func__); + if (copy_from_user(&info, (void *) arg, sizeof(info))) + rc = -EFAULT; + else + rc = audlpa_pmem_remove(audio, &info); + break; + } + case AUDIO_ASYNC_WRITE: + pr_debug("%s: AUDIO_ASYNC_WRITE\n", __func__); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else + rc = audlpa_aio_buf_add(audio, 1, (void __user *) arg); + break; + + case AUDIO_GET_SESSION_ID: + if (copy_to_user((void *) arg, &audio->ac->session, + sizeof(unsigned short))) + return -EFAULT; + rc = 0; + break; + + default: + rc = audio->codec_ops.ioctl(file, cmd, arg); + } +fail: + mutex_unlock(&audio->lock); + return rc; +} + +/* Only useful in tunnel-mode */ +int audlpa_async_fsync(struct audio *audio) +{ + int rc = 0; + + pr_aud_info("%s:Session %d\n", __func__, audio->ac->session); + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + mutex_lock(&audio->write_lock); + audio->teos = 0; + + rc = wait_event_interruptible(audio->write_wait, + ((list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped)); + + if (audio->wflush || audio->stopped) + goto flush_event; + + if (rc < 0) { + pr_aud_err("%s: wait event for list_empty failed, rc = %d\n", + __func__, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + + if (rc < 0) { + pr_aud_err("%s: q6asm_cmd failed, rc = %d", __func__, rc); + goto done; + } + rc = wait_event_interruptible_timeout(audio->write_wait, + (audio->teos || audio->wflush || + audio->stopped), 5*HZ); + + if (rc < 0) { + pr_aud_err("%s: wait event for teos failed, rc = %d\n", __func__, + rc); + goto done; + } + + if (audio->teos == 1) { + rc = audio_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->out_enabled = 1; + audio->out_needed = 1; + } + } + +flush_event: + if (audio->stopped || audio->wflush) + rc = -EBUSY; + +done: + mutex_unlock(&audio->write_lock); + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +int audlpa_fsync(struct file *file, int datasync) +{ + struct audio *audio = file->private_data; + + return audlpa_async_fsync(audio); +} + +static void audlpa_reset_pmem_region(struct audio *audio) +{ + struct audlpa_pmem_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audlpa_pmem_region, list); + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + } + + return; +} + +static void audlpa_unmap_pmem_region(struct audio *audio) +{ + struct audlpa_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_aud_info("%s:\n", __func__); + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audlpa_pmem_region, list); + if (region != NULL) { + pr_aud_info("%s: phy_address = 0x%lx\n", __func__, + region->paddr); + rc = q6asm_memory_unmap(audio->ac, + (uint32_t)region->paddr, IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + } + } +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + pr_aud_info("%s: audio instance 0x%08x freeing, session %d\n", __func__, + (int)audio, audio->ac->session); + + mutex_lock(&audio->lock); + audio->wflush = 1; + if (audio->out_enabled) + audlpa_async_flush(audio); + audio->wflush = 0; + audlpa_unmap_pmem_region(audio); + audio_disable(audio); + msm_clear_session_id(audio->ac->session); + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->ac->session); + q6asm_audio_client_free(audio->ac); + audlpa_reset_pmem_region(audio); +#ifdef CONFIG_HAS_EARLYSUSPEND + unregister_early_suspend(&audio->suspend_ctl.node); +#endif + audio->opened = 0; + audio->out_enabled = 0; + audio->out_prefill = 0; + audio->event_abort = 1; + wake_up(&audio->event_wait); + audlpa_reset_event_queue(audio); + iounmap(audio->data); + pmem_kfree(audio->phys); + if (audio->stopped == 0) + audlpa_allow_sleep(audio); + wake_lock_destroy(&audio->wakelock); + + mutex_unlock(&audio->lock); +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio); + return 0; +} + +static void audlpa_post_event(struct audio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audlpa_event *e_node = NULL; + + spin_lock(&audio->event_queue_lock); + + pr_debug("%s:\n", __func__); + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audlpa_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audlpa_event), GFP_ATOMIC); + if (!e_node) { + pr_aud_err("%s: No mem to post event %d\n", __func__, type); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock(&audio->event_queue_lock); + wake_up(&audio->event_wait); +} + +#ifdef CONFIG_HAS_EARLYSUSPEND +static void audlpa_suspend(struct early_suspend *h) +{ + struct audlpa_suspend_ctl *ctl = + container_of(h, struct audlpa_suspend_ctl, node); + union msm_audio_event_payload payload; + + pr_aud_info("%s:\n", __func__); + audlpa_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload); +} + +static void audlpa_resume(struct early_suspend *h) +{ + struct audlpa_suspend_ctl *ctl = + container_of(h, struct audlpa_suspend_ctl, node); + union msm_audio_event_payload payload; + + pr_aud_info("%s:\n", __func__); + audlpa_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload); +} +#endif + +#ifdef CONFIG_DEBUG_FS +static ssize_t audlpa_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t audlpa_debug_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct audio *audio = file->private_data; + + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "out_enabled %d\n", audio->out_enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "volume %x\n", audio->volume); + n += scnprintf(buffer + n, debug_bufmax - n, + "sample rate %d\n", + audio->out_sample_rate); + n += scnprintf(buffer + n, debug_bufmax - n, + "channel mode %d\n", + audio->out_channel_mode); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "running %d\n", audio->running); + n += scnprintf(buffer + n, debug_bufmax - n, + "out_needed %d\n", audio->out_needed); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static const struct file_operations audlpa_debug_fops = { + .read = audlpa_debug_read, + .open = audlpa_debug_open, +}; +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = NULL; + int rc, i, dec_attrb = 0; + struct audlpa_event *e_node = NULL; +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_lpa_" + 5]; +#endif + char wake_lock_name[24]; + + /* Allocate audio instance, set to zero */ + audio = kzalloc(sizeof(struct audio), GFP_KERNEL); + if (!audio) { + pr_aud_err("%s: no memory to allocate audio instance\n", __func__); + rc = -ENOMEM; + goto done; + } + + if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { + pr_aud_info("%s: Tunnel Mode playback\n", __func__); + } else { + kfree(audio); + rc = -EACCES; + goto done; + } + + /* Allocate the decoder based on inode minor number*/ + audio->minor_no = iminor(inode); + + if (audio->minor_no >= ARRAY_SIZE(audlpa_decs)) { + pr_aud_err("%s: incorrect inode %d\n", __func__, audio->minor_no); + kfree(audio); + rc = -EINVAL; + goto done; + } + + dec_attrb |= audlpa_decs[audio->minor_no].dec_attrb; + audio->codec_ops.ioctl = audlpa_decs[audio->minor_no].ioctl; + audio->codec_ops.set_params = audlpa_decs[audio->minor_no].set_params; + audio->buffer_size = BUFSZ; + audio->buffer_count = MAX_BUF; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6_audlpa_out_cb, + (void *)audio); + if (!audio->ac) { + pr_aud_err("%s: Could not allocate memory for lpa client\n", + __func__); + rc = -ENOMEM; + goto err; + } + rc = q6asm_open_write(audio->ac, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_info("%s: lpa out open failed\n", __func__); + goto err; + } + + pr_debug("%s: Set mode to AIO session[%d]\n", + __func__, + audio->ac->session); + rc = q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + if (rc < 0) + pr_aud_err("%s: Set IO mode failed\n", __func__); + + + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->write_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->pmem_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + init_waitqueue_head(&audio->wait); + init_waitqueue_head(&audio->event_wait); + spin_lock_init(&audio->event_queue_lock); + snprintf(wake_lock_name, sizeof wake_lock_name, "audio_lpa_%x", + audio->ac->session); + wake_lock_init(&audio->wakelock, WAKE_LOCK_SUSPEND, wake_lock_name); + + audio->out_sample_rate = 44100; + audio->out_channel_mode = 2; + audio->out_bits = 16; + audio->volume = 0x2000; + + file->private_data = audio; + audio->opened = 1; + audio->out_enabled = 0; + audio->out_prefill = 0; + audio->bytes_consumed = 0; + + audio->device_events = AUDDEV_EVT_STREAM_VOL_CHG; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + + rc = auddev_register_evt_listner(audio->device_events, + AUDDEV_CLNT_DEC, + audio->ac->session, + lpa_listner, + (void *)audio); + if (rc) { + pr_aud_err("%s: failed to register listner\n", __func__); + goto err; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_lpa_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *) audio, &audlpa_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_aud_info("%s: debugfs_create_file failed\n", __func__); +#endif +#ifdef CONFIG_HAS_EARLYSUSPEND + audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; + audio->suspend_ctl.node.resume = audlpa_resume; + audio->suspend_ctl.node.suspend = audlpa_suspend; + audio->suspend_ctl.audio = audio; + register_early_suspend(&audio->suspend_ctl.node); +#endif + for (i = 0; i < AUDLPA_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audlpa_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_aud_err("%s: event pkt alloc failed\n", __func__); + break; + } + } + pr_aud_info("%s: audio instance 0x%08x created session[%d]\n", __func__, + (int)audio, + audio->ac->session); +done: + return rc; +err: + q6asm_audio_client_free(audio->ac); + iounmap(audio->data); + pmem_kfree(audio->phys); + kfree(audio); + return rc; +} + +static const struct file_operations audio_lpa_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audlpa_fsync, +}; + +static dev_t audlpa_devno; +static struct class *audlpa_class; +struct audlpa_device { + const char *name; + struct device *device; + struct cdev cdev; +}; + +static struct audlpa_device *audlpa_devices; + +static void audlpa_create(struct audlpa_device *adev, const char *name, + struct device *parent, dev_t devt) +{ + struct device *dev; + int rc; + + dev = device_create(audlpa_class, parent, devt, "%s", name); + if (IS_ERR(dev)) + return; + + cdev_init(&adev->cdev, &audio_lpa_fops); + adev->cdev.owner = THIS_MODULE; + + rc = cdev_add(&adev->cdev, devt, 1); + if (rc < 0) { + device_destroy(audlpa_class, devt); + } else { + adev->device = dev; + adev->name = name; + } +} + +static int __init audio_init(void) +{ + int rc; + int n = ARRAY_SIZE(audlpa_decs); + + audlpa_devices = kzalloc(sizeof(struct audlpa_device) * n, GFP_KERNEL); + if (!audlpa_devices) + return -ENOMEM; + + audlpa_class = class_create(THIS_MODULE, "audlpa"); + if (IS_ERR(audlpa_class)) + goto fail_create_class; + + rc = alloc_chrdev_region(&audlpa_devno, 0, n, "msm_audio_lpa"); + if (rc < 0) + goto fail_alloc_region; + + for (n = 0; n < ARRAY_SIZE(audlpa_decs); n++) { + audlpa_create(audlpa_devices + n, + audlpa_decs[n].name, NULL, + MKDEV(MAJOR(audlpa_devno), n)); + } + + return 0; + +fail_alloc_region: + class_unregister(audlpa_class); + return rc; +fail_create_class: + kfree(audlpa_devices); + return -ENOMEM; +} + +static void __exit audio_exit(void) +{ + class_unregister(audlpa_class); + kfree(audlpa_devices); +} + +module_init(audio_init); +module_exit(audio_exit); + +MODULE_DESCRIPTION("MSM LPA driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_lpa.h b/arch/arm/mach-msm/qdsp6v3/audio_lpa.h new file mode 100644 index 00000000000..8e7a22cd9c8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_lpa.h @@ -0,0 +1,129 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Code Aurora nor + * the names of its contributors may be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef AUDIO_LPA_H +#define AUDIO_LPA_H + +#include +#include + +#define ADRV_STATUS_OBUF_GIVEN 0x00000001 +#define ADRV_STATUS_IBUF_GIVEN 0x00000002 +#define ADRV_STATUS_FSYNC 0x00000004 +#define ADRV_STATUS_PAUSE 0x00000008 + +#define SOFT_PAUSE_PERIOD 30 /* ramp up/down for 30ms */ +#define SOFT_PAUSE_STEP 2000 /* Step value 2ms or 2000us */ +enum { + SOFT_PAUSE_CURVE_LINEAR = 0, + SOFT_PAUSE_CURVE_EXP, + SOFT_PAUSE_CURVE_LOG, +}; + +struct buffer { + void *data; + unsigned size; + unsigned used; /* Input usage actual DSP produced PCM size */ + unsigned addr; +}; + +#ifdef CONFIG_HAS_EARLYSUSPEND +struct audlpa_suspend_ctl { + struct early_suspend node; + struct audio *audio; +}; +#endif + +struct codec_operations { + long (*ioctl)(struct file *, unsigned int, unsigned long); + int (*set_params)(void *); +}; + +struct audio { + spinlock_t dsp_lock; + + uint8_t out_needed; /* number of buffers the dsp is waiting for */ + struct list_head out_queue; /* queue to retain output buffers */ + + struct mutex lock; + struct mutex write_lock; + wait_queue_head_t write_wait; + + struct audio_client *ac; + + /* configuration to use on next enable */ + uint32_t out_sample_rate; + uint32_t out_channel_mode; + uint32_t out_bits; /* bits per sample (used by PCM decoder) */ + + /* data allocated for various buffers */ + char *data; + int32_t phys; /* physical address of write buffer */ + + uint32_t drv_status; + int wflush; /* Write flush */ + int opened; + int out_enabled; + int out_prefill; + int running; + int stopped; /* set when stopped, cleared on flush */ + int buf_refresh; + int teos; /* valid only if tunnel mode & no data left for decoder */ + +#ifdef CONFIG_HAS_EARLYSUSPEND + struct audlpa_suspend_ctl suspend_ctl; +#endif + + struct wake_lock wakelock; +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + + wait_queue_head_t wait; + struct list_head free_event_queue; + struct list_head event_queue; + wait_queue_head_t event_wait; + spinlock_t event_queue_lock; + struct mutex get_event_lock; + int event_abort; + + uint32_t device_events; + + struct list_head pmem_region_queue; /* protected by lock */ + + int eq_enable; + int eq_needs_commit; + uint32_t volume; + + unsigned int minor_no; + struct codec_operations codec_ops; + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t bytes_consumed; +}; + +#endif /* !AUDIO_LPA_H */ diff --git a/arch/arm/mach-msm/qdsp6v3/audio_mvs.c b/arch/arm/mach-msm/qdsp6v3/audio_mvs.c new file mode 100644 index 00000000000..529b5840b06 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_mvs.c @@ -0,0 +1,998 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Each buffer is 20 ms, queue holds 200 ms of data. */ +#define MVS_MAX_Q_LEN 10 + +/* Length of the DSP frame info header added to the voc packet. */ +#define DSP_FRAME_HDR_LEN 1 + +enum audio_mvs_state_type { + AUDIO_MVS_CLOSED, + AUDIO_MVS_STARTED, + AUDIO_MVS_STOPPED +}; + +struct audio_mvs_buf_node { + struct list_head list; + struct msm_audio_mvs_frame frame; +}; + +struct audio_mvs_info_type { + enum audio_mvs_state_type state; + + uint32_t mvs_mode; + uint32_t rate_type; + uint32_t dtx_mode; + + struct list_head in_queue; + struct list_head free_in_queue; + + struct list_head out_queue; + struct list_head free_out_queue; + + wait_queue_head_t out_wait; + + struct mutex lock; + struct mutex in_lock; + struct mutex out_lock; + + spinlock_t dsp_lock; + + struct wake_lock suspend_lock; + struct wake_lock idle_lock; + + void *memory_chunk; +}; + +static struct audio_mvs_info_type audio_mvs_info; + +static uint32_t audio_mvs_get_rate(uint32_t mvs_mode, uint32_t rate_type) +{ + uint32_t cvs_rate; + + if (mvs_mode == MVS_MODE_AMR_WB) + cvs_rate = rate_type - MVS_AMR_MODE_0660; + else + cvs_rate = rate_type; + + pr_debug("%s: CVS rate is %d for MVS mode %d\n", + __func__, cvs_rate, mvs_mode); + + return cvs_rate; +} + +static void audio_mvs_process_ul_pkt(uint8_t *voc_pkt, + uint32_t pkt_len, + void *private_data) +{ + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = private_data; + unsigned long dsp_flags; + + /* Copy up-link packet into out_queue. */ + spin_lock_irqsave(&audio->dsp_lock, dsp_flags); + + if (!list_empty(&audio->free_out_queue)) { + buf_node = list_first_entry(&audio->free_out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + switch (audio->mvs_mode) { + case MVS_MODE_AMR: + case MVS_MODE_AMR_WB: { + /* Remove the DSP frame info header. Header format: + * Bits 0-3: Frame rate + * Bits 4-7: Frame type + */ + buf_node->frame.frame_type = ((*voc_pkt) & 0xF0) >> 4; + buf_node->frame.frame_rate = ((*voc_pkt) & 0x0F); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->out_queue); + break; + } + + case MVS_MODE_IS127: { + buf_node->frame.frame_type = 0; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->out_queue); + break; + } + + case MVS_MODE_G729A: { + /* G729 frames are 10ms each, but the DSP works with + * 20ms frames and sends two 10ms frames per buffer. + * Extract the two frames and put them in separate + * buffers. + */ + /* Remove the first DSP frame info header. + * Header format: + * Bits 0-1: Frame type + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length of the + * first frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->out_queue); + + /* Get another buffer from the free Q and fill in the + * second frame. + */ + if (!list_empty(&audio->free_out_queue)) { + buf_node = + list_first_entry(&audio->free_out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Remove the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length + * of the first frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->out_queue); + + } else { + /* Drop the second frame. */ + pr_aud_err("%s: UL data dropped, read is slow\n", + __func__); + } + + break; + } + + case MVS_MODE_G711A: { + /* G711 frames are 10ms each, but the DSP works with + * 20ms frames and sends two 10ms frames per buffer. + * Extract the two frames and put them in separate + * buffers. + */ + /* Remove the first DSP frame info header. + * Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length of the + * first frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->out_queue); + + /* Get another buffer from the free Q and fill in the + * second frame. + */ + if (!list_empty(&audio->free_out_queue)) { + buf_node = + list_first_entry(&audio->free_out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Remove the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + buf_node->frame.frame_type = (*voc_pkt) & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + /* There are two frames in the buffer. Length + * of the second frame: + */ + buf_node->frame.len = (pkt_len - + 2 * DSP_FRAME_HDR_LEN) / 2; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->out_queue); + } else { + /* Drop the second frame. */ + pr_aud_err("%s: UL data dropped, read is slow\n", + __func__); + } + break; + } + + default: { + buf_node->frame.frame_type = 0; + + buf_node->frame.len = pkt_len; + + memcpy(&buf_node->frame.voc_pkt[0], + voc_pkt, + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->out_queue); + } + } + } else { + pr_aud_err("%s: UL data dropped, read is slow\n", __func__); + } + + spin_unlock_irqrestore(&audio->dsp_lock, dsp_flags); + + wake_up(&audio->out_wait); +} + +static void audio_mvs_process_dl_pkt(uint8_t *voc_pkt, + uint32_t *pkt_len, + void *private_data) +{ + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = private_data; + unsigned long dsp_flags; + + spin_lock_irqsave(&audio->dsp_lock, dsp_flags); + + if (!list_empty(&audio->in_queue)) { + uint32_t rate_type = audio_mvs_get_rate(audio->mvs_mode, + audio->rate_type); + + buf_node = list_first_entry(&audio->in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + switch (audio->mvs_mode) { + case MVS_MODE_AMR: + case MVS_MODE_AMR_WB: { + /* Add the DSP frame info header. Header format: + * Bits 0-3: Frame rate + * Bits 4-7: Frame type + */ + *voc_pkt = ((buf_node->frame.frame_type & 0x0F) << 4) | + (rate_type & 0x0F); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->free_in_queue); + break; + } + + case MVS_MODE_IS127: { + /* Add the DSP frame info header. Header format: + * Bits 0-3: Frame rate + * Bits 4-7: Frame type + */ + *voc_pkt = rate_type & 0x0F; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->free_in_queue); + break; + } + + case MVS_MODE_G729A: { + /* G729 frames are 10ms each but the DSP expects 20ms + * worth of data, so send two 10ms frames per buffer. + */ + /* Add the first DSP frame info header. Header format: + * Bits 0-1: Frame type + */ + *voc_pkt = buf_node->frame.frame_type & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->free_in_queue); + + if (!list_empty(&audio->in_queue)) { + /* Get the second buffer. */ + buf_node = list_first_entry(&audio->in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Add the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + */ + *voc_pkt = buf_node->frame.frame_type & 0x03; + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = *pkt_len + + buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->free_in_queue); + } else { + /* Only 10ms worth of data is available, signal + * erasure frame. + */ + *voc_pkt = MVS_G729A_ERASURE & 0x03; + + *pkt_len = *pkt_len + DSP_FRAME_HDR_LEN; + } + + break; + } + + case MVS_MODE_G711A: { + /* G711 frames are 10ms each but the DSP expects 20ms + * worth of data, so send two 10ms frames per buffer. + */ + /* Add the first DSP frame info header. Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + *voc_pkt = ((rate_type & 0x0F) << 2) | + (buf_node->frame.frame_type & 0x03); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + voc_pkt = voc_pkt + buf_node->frame.len; + + list_add_tail(&buf_node->list, &audio->free_in_queue); + + if (!list_empty(&audio->in_queue)) { + /* Get the second buffer. */ + buf_node = list_first_entry(&audio->in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + /* Add the second DSP frame info header. + * Header format: + * Bits 0-1: Frame type + * Bits 2-3: Frame rate + */ + *voc_pkt = ((rate_type & 0x0F) << 2) | + (buf_node->frame.frame_type & 0x03); + voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN; + + *pkt_len = *pkt_len + + buf_node->frame.len + DSP_FRAME_HDR_LEN; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, + &audio->free_in_queue); + } else { + /* Only 10ms worth of data is available, signal + * erasure frame. + */ + *voc_pkt = ((rate_type & 0x0F) << 2) | + (MVS_G711A_ERASURE & 0x03); + + *pkt_len = *pkt_len + DSP_FRAME_HDR_LEN; + } + break; + } + + default: { + *pkt_len = buf_node->frame.len; + + memcpy(voc_pkt, + &buf_node->frame.voc_pkt[0], + buf_node->frame.len); + + list_add_tail(&buf_node->list, &audio->free_in_queue); + } + } + } else { + *pkt_len = 0; + + pr_aud_info("%s: No DL data available to send to MVS\n", __func__); + } + + spin_unlock_irqrestore(&audio->dsp_lock, dsp_flags); +} + +static uint32_t audio_mvs_get_media_type(uint32_t mvs_mode, uint32_t rate_type) +{ + uint32_t media_type; + + switch (mvs_mode) { + case MVS_MODE_IS127: + media_type = VSS_MEDIA_ID_EVRC_MODEM; + break; + + case MVS_MODE_AMR: + media_type = VSS_MEDIA_ID_AMR_NB_MODEM; + break; + + case MVS_MODE_LINEAR_PCM: + media_type = VSS_MEDIA_ID_PCM_NB; + break; + + case MVS_MODE_PCM: + media_type = VSS_MEDIA_ID_PCM_NB; + break; + + case MVS_MODE_AMR_WB: + media_type = VSS_MEDIA_ID_AMR_WB_MODEM; + break; + + case MVS_MODE_G729A: + media_type = VSS_MEDIA_ID_G729; + break; + + case MVS_MODE_G711A: + if (rate_type == MVS_G711A_MODE_MULAW) + media_type = VSS_MEDIA_ID_G711_MULAW; + else + media_type = VSS_MEDIA_ID_G711_ALAW; + break; + + default: + media_type = VSS_MEDIA_ID_PCM_NB; + } + + pr_debug("%s: media_type is 0x%x\n", __func__, media_type); + + return media_type; +} + +static uint32_t audio_mvs_get_network_type(uint32_t mvs_mode) +{ + uint32_t network_type; + + switch (mvs_mode) { + case MVS_MODE_IS127: + case MVS_MODE_AMR: + case MVS_MODE_LINEAR_PCM: + case MVS_MODE_PCM: + case MVS_MODE_G729A: + case MVS_MODE_G711A: + network_type = VSS_NETWORK_ID_VOIP_NB; + break; + + case MVS_MODE_AMR_WB: + network_type = VSS_NETWORK_ID_VOIP_WB; + break; + + default: + network_type = VSS_NETWORK_ID_DEFAULT; + } + + pr_debug("%s: network_type is 0x%x\n", __func__, network_type); + + return network_type; +} + +static int audio_mvs_start(struct audio_mvs_info_type *audio) +{ + int rc = 0; + + pr_aud_info("%s\n", __func__); + + /* Prevent sleep. */ + wake_lock(&audio->suspend_lock); + wake_lock(&audio->idle_lock); + + rc = voice_set_voc_path_full(1); + + if (rc == 0) { + voice_register_mvs_cb(audio_mvs_process_ul_pkt, + audio_mvs_process_dl_pkt, + audio); + + voice_config_vocoder( + audio_mvs_get_media_type(audio->mvs_mode, audio->rate_type), + audio_mvs_get_rate(audio->mvs_mode, audio->rate_type), + audio_mvs_get_network_type(audio->mvs_mode), + audio->dtx_mode); + + audio->state = AUDIO_MVS_STARTED; + } else { + pr_aud_err("%s: Error %d setting voc path to full\n", __func__, rc); + } + + return rc; +} + +static int audio_mvs_stop(struct audio_mvs_info_type *audio) +{ + int rc = 0; + + pr_aud_info("%s\n", __func__); + + voice_set_voc_path_full(0); + + audio->state = AUDIO_MVS_STOPPED; + + /* Allow sleep. */ + wake_unlock(&audio->suspend_lock); + wake_unlock(&audio->idle_lock); + + return rc; +} + +static int audio_mvs_open(struct inode *inode, struct file *file) +{ + int rc = 0; + int i; + int offset = 0; + struct audio_mvs_buf_node *buf_node = NULL; + + pr_aud_info("%s\n", __func__); + + mutex_lock(&audio_mvs_info.lock); + + /* Allocate input and output buffers. */ + audio_mvs_info.memory_chunk = kmalloc(2 * MVS_MAX_Q_LEN * + sizeof(struct audio_mvs_buf_node), + GFP_KERNEL); + + if (audio_mvs_info.memory_chunk != NULL) { + for (i = 0; i < MVS_MAX_Q_LEN; i++) { + buf_node = audio_mvs_info.memory_chunk + offset; + + list_add_tail(&buf_node->list, + &audio_mvs_info.free_in_queue); + + offset = offset + sizeof(struct audio_mvs_buf_node); + } + + for (i = 0; i < MVS_MAX_Q_LEN; i++) { + buf_node = audio_mvs_info.memory_chunk + offset; + + list_add_tail(&buf_node->list, + &audio_mvs_info.free_out_queue); + + offset = offset + sizeof(struct audio_mvs_buf_node); + } + + audio_mvs_info.state = AUDIO_MVS_STOPPED; + + file->private_data = &audio_mvs_info; + + } else { + pr_aud_err("%s: No memory for IO buffers\n", __func__); + + rc = -ENOMEM; + } + + mutex_unlock(&audio_mvs_info.lock); + + return rc; +} + +static int audio_mvs_release(struct inode *inode, struct file *file) +{ + struct list_head *ptr = NULL; + struct list_head *next = NULL; + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = file->private_data; + + pr_aud_info("%s\n", __func__); + + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STARTED) + audio_mvs_stop(audio); + + /* Free input and output memory. */ + mutex_lock(&audio->in_lock); + + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + list_for_each_safe(ptr, next, &audio->free_in_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + mutex_unlock(&audio->in_lock); + + + mutex_lock(&audio->out_lock); + + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + list_for_each_safe(ptr, next, &audio->free_out_queue) { + buf_node = list_entry(ptr, struct audio_mvs_buf_node, list); + list_del(&buf_node->list); + } + + mutex_unlock(&audio->out_lock); + + kfree(audio->memory_chunk); + audio->memory_chunk = NULL; + + audio->state = AUDIO_MVS_CLOSED; + + mutex_unlock(&audio->lock); + + return 0; +} + +static ssize_t audio_mvs_read(struct file *file, + char __user *buf, + size_t count, + loff_t *pos) +{ + int rc = 0; + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = file->private_data; + + pr_debug("%s:\n", __func__); + + rc = wait_event_interruptible_timeout(audio->out_wait, + (!list_empty(&audio->out_queue) || + audio->state == AUDIO_MVS_STOPPED), + 1 * HZ); + + if (rc > 0) { + mutex_lock(&audio->out_lock); + + if ((audio->state == AUDIO_MVS_STARTED) && + (!list_empty(&audio->out_queue))) { + + if (count >= sizeof(struct msm_audio_mvs_frame)) { + buf_node = list_first_entry(&audio->out_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + rc = copy_to_user(buf, + &buf_node->frame, + sizeof(struct msm_audio_mvs_frame)); + + if (rc == 0) { + rc = buf_node->frame.len + + sizeof(buf_node->frame.frame_type) + + sizeof(buf_node->frame.len); + } else { + pr_aud_err("%s: Copy to user retuned %d", + __func__, rc); + + rc = -EFAULT; + } + + list_add_tail(&buf_node->list, + &audio->free_out_queue); + } else { + pr_aud_err("%s: Read count %d < sizeof(frame) %d", + __func__, count, + sizeof(struct msm_audio_mvs_frame)); + + rc = -ENOMEM; + } + } else { + pr_aud_err("%s: Read performed in state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->out_lock); + + } else if (rc == 0) { + pr_aud_err("%s: No UL data available\n", __func__); + + rc = -ETIMEDOUT; + } else { + pr_aud_err("%s: Read was interrupted\n", __func__); + + rc = -ERESTARTSYS; + } + + return rc; +} + +static ssize_t audio_mvs_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *pos) +{ + int rc = 0; + struct audio_mvs_buf_node *buf_node = NULL; + struct audio_mvs_info_type *audio = file->private_data; + + pr_debug("%s:\n", __func__); + + mutex_lock(&audio->in_lock); + + if (audio->state == AUDIO_MVS_STARTED) { + if (count <= sizeof(struct msm_audio_mvs_frame)) { + if (!list_empty(&audio->free_in_queue)) { + buf_node = + list_first_entry(&audio->free_in_queue, + struct audio_mvs_buf_node, + list); + list_del(&buf_node->list); + + rc = copy_from_user(&buf_node->frame, + buf, + count); + + list_add_tail(&buf_node->list, + &audio->in_queue); + } else { + pr_aud_err("%s: No free DL buffs\n", __func__); + } + } else { + pr_aud_err("%s: Write count %d < sizeof(frame) %d", + __func__, count, + sizeof(struct msm_audio_mvs_frame)); + + rc = -ENOMEM; + } + } else { + pr_aud_err("%s: Write performed in invalid state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->in_lock); + + return rc; +} + +static long audio_mvs_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + struct audio_mvs_info_type *audio = file->private_data; + + pr_aud_info("%s:\n", __func__); + + switch (cmd) { + case AUDIO_GET_MVS_CONFIG: { + struct msm_audio_mvs_config config; + + pr_aud_info("%s: IOCTL GET_MVS_CONFIG\n", __func__); + + mutex_lock(&audio->lock); + + config.mvs_mode = audio->mvs_mode; + config.rate_type = audio->rate_type; + config.dtx_mode = audio->dtx_mode; + + mutex_unlock(&audio->lock); + + rc = copy_to_user((void *)arg, &config, sizeof(config)); + if (rc == 0) + rc = sizeof(config); + else + pr_aud_err("%s: Config copy failed %d\n", __func__, rc); + + break; + } + + case AUDIO_SET_MVS_CONFIG: { + struct msm_audio_mvs_config config; + + pr_aud_info("%s: IOCTL SET_MVS_CONFIG\n", __func__); + + rc = copy_from_user(&config, (void *)arg, sizeof(config)); + if (rc == 0) { + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STOPPED) { + audio->mvs_mode = config.mvs_mode; + audio->rate_type = config.rate_type; + audio->dtx_mode = config.dtx_mode; + } else { + pr_aud_err("%s: Set confg called in state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->lock); + } else { + pr_aud_err("%s: Config copy failed %d\n", __func__, rc); + } + + break; + } + + case AUDIO_START: { + pr_aud_info("%s: IOCTL START\n", __func__); + + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STOPPED) { + rc = audio_mvs_start(audio); + + if (rc != 0) + audio_mvs_stop(audio); + } else { + pr_aud_err("%s: Start called in invalid state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->lock); + + break; + } + + case AUDIO_STOP: { + pr_aud_info("%s: IOCTL STOP\n", __func__); + + mutex_lock(&audio->lock); + + if (audio->state == AUDIO_MVS_STARTED) { + rc = audio_mvs_stop(audio); + } else { + pr_aud_err("%s: Stop called in invalid state %d\n", + __func__, audio->state); + + rc = -EPERM; + } + + mutex_unlock(&audio->lock); + + break; + } + + default: { + pr_aud_err("%s: Unknown IOCTL %d\n", __func__, cmd); + } + } + + return rc; +} + +static const struct file_operations audio_mvs_fops = { + .owner = THIS_MODULE, + .open = audio_mvs_open, + .release = audio_mvs_release, + .read = audio_mvs_read, + .write = audio_mvs_write, + .unlocked_ioctl = audio_mvs_ioctl +}; + +struct miscdevice audio_mvs_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_mvs", + .fops = &audio_mvs_fops +}; + +static int __init audio_mvs_init(void) +{ + int rc = 0; + + memset(&audio_mvs_info, 0, sizeof(audio_mvs_info)); + + init_waitqueue_head(&audio_mvs_info.out_wait); + + mutex_init(&audio_mvs_info.lock); + mutex_init(&audio_mvs_info.in_lock); + mutex_init(&audio_mvs_info.out_lock); + + spin_lock_init(&audio_mvs_info.dsp_lock); + + INIT_LIST_HEAD(&audio_mvs_info.in_queue); + INIT_LIST_HEAD(&audio_mvs_info.free_in_queue); + INIT_LIST_HEAD(&audio_mvs_info.out_queue); + INIT_LIST_HEAD(&audio_mvs_info.free_out_queue); + + wake_lock_init(&audio_mvs_info.suspend_lock, + WAKE_LOCK_SUSPEND, + "audio_mvs_suspend"); + wake_lock_init(&audio_mvs_info.idle_lock, + WAKE_LOCK_IDLE, + "audio_mvs_idle"); + + rc = misc_register(&audio_mvs_misc); + + return rc; +} + +static void __exit audio_mvs_exit(void){ + pr_aud_info("%s:\n", __func__); + + misc_deregister(&audio_mvs_misc); +} + +module_init(audio_mvs_init); +module_exit(audio_mvs_exit); + +MODULE_DESCRIPTION("MSM MVS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_utils.c b/arch/arm/mach-msm/qdsp6v3/audio_utils.c new file mode 100644 index 00000000000..aa641af7ee8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_utils.c @@ -0,0 +1,644 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +static int audio_in_pause(struct q6audio_in *audio) +{ + int rc; + + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + + return rc; +} + +static int audio_in_flush(struct q6audio_in *audio) +{ + int rc; + + pr_debug("%s:session id %d: flush\n", __func__, audio->ac->session); + /* Implicitly issue a pause to the decoder before flushing */ + rc = audio_in_pause(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + return rc; + } + + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) { + pr_aud_err("%s:session id %d: flush cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + return rc; + } + audio->rflush = 1; + audio->wflush = 1; + memset(audio->out_frame_info, 0, sizeof(audio->out_frame_info)); + wake_up(&audio->read_wait); + /* get read_lock to ensure no more waiting read thread */ + mutex_lock(&audio->read_lock); + audio->rflush = 0; + mutex_unlock(&audio->read_lock); + wake_up(&audio->write_wait); + /* get write_lock to ensure no more waiting write thread */ + mutex_lock(&audio->write_lock); + audio->wflush = 0; + mutex_unlock(&audio->write_lock); + pr_debug("%s:session id %d: in_bytes %d\n", __func__, + audio->ac->session, atomic_read(&audio->in_bytes)); + pr_debug("%s:session id %d: in_samples %d\n", __func__, + audio->ac->session, atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return 0; +} + +void audio_in_get_dsp_frames(struct q6audio_in *audio, + uint32_t token, uint32_t *payload) +{ + uint32_t index; + + index = token; + pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n", + __func__, audio->ac->session, token, payload[7], + payload[3]); + pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__, + audio->ac->session, payload[4], payload[5]); + pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__, + audio->ac->session, payload[6], payload[8]); + pr_debug("%s:session id %d: enc frame size=0x%8x\n", __func__, + audio->ac->session, payload[2]); + + audio->out_frame_info[index][0] = payload[7]; + audio->out_frame_info[index][1] = payload[3]; + + /* statistics of read */ + atomic_add(payload[2], &audio->in_bytes); + atomic_add(payload[7], &audio->in_samples); + + if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) { + atomic_inc(&audio->out_count); + wake_up(&audio->read_wait); + } +} + +/* must be called with audio->lock held */ +int audio_in_enable(struct q6audio_in *audio) +{ + if (audio->enabled) + return 0; + + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +/* must be called with audio->lock held */ +int audio_in_disable(struct q6audio_in *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s:session id %d: inbytes[%d] insamples[%d]\n", + __func__, audio->ac->session, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("%s:session id %d: Failed to close the\ + session rc=%d\n", __func__, audio->ac->session, + rc); + audio->stopped = 1; + memset(audio->out_frame_info, 0, + sizeof(audio->out_frame_info)); + wake_up(&audio->read_wait); + wake_up(&audio->write_wait); + } + pr_debug("%s:session id %d: enabled[%d]\n", __func__, + audio->ac->session, audio->enabled); + return rc; +} + +int audio_in_buf_alloc(struct q6audio_in *audio) +{ + int rc = 0; + + switch (audio->buf_alloc) { + case NO_BUF_ALLOC: + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, + audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc\ + failed\n", __func__, + audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + } + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + break; + case BUF_ALLOC_IN: + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + break; + case BUF_ALLOC_OUT: + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc\ + failed\n", __func__, + audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + } + break; + default: + pr_debug("%s:session id %d: buf[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + } + + return rc; +} +/* ------------------- device --------------------- */ +long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_FLUSH: { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the read_lock. + * While audio->stopped read threads will always + * exit immediately. + */ + rc = audio_in_flush(audio); + if (rc < 0) + pr_aud_err("%s:session id %d: Flush Fail rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + case AUDIO_PAUSE: { + pr_debug("%s:session id %d: AUDIO_PAUSE\n", __func__, + audio->ac->session); + if (audio->enabled) + audio_in_pause(audio); + break; + } + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, cfg.buffer_size, + cfg.buffer_count); + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + /* Minimum single frame size, + but with in maximum frames number */ + if ((cfg.buffer_size < (audio->min_frame_size+ \ + sizeof(struct meta_out_dsp))) || + (cfg.buffer_count < FRAME_NUM)) { + rc = -EINVAL; + break; + } + audio->str_cfg.buffer_size = cfg.buffer_size; + audio->str_cfg.buffer_count = cfg.buffer_count; + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s: session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + rc = 0; + pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, + audio->str_cfg.buffer_size, + audio->str_cfg.buffer_count); + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &audio->ac->session, + sizeof(unsigned short))) { + rc = -EFAULT; + } + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + /* Restrict the num of frames per buf to coincide with + * default buf size */ + if (cfg.frames_per_buf > audio->max_frames_per_buf) { + rc = -EFAULT; + break; + } + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + audio->buf_cfg.frames_per_buf = cfg.frames_per_buf; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, cfg.meta_info_enable, + cfg.frames_per_buf); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_CONFIG: { + if (copy_to_user((void *)arg, &audio->pcm_cfg, + sizeof(struct msm_audio_config))) + rc = -EFAULT; + break; + + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_aud_err("%s:session id %d: Not sufficient permission to" + "change the record mode\n", __func__, + audio->ac->session); + rc = -EACCES; + break; + } + if ((cfg.buffer_count > PCM_BUF_COUNT) || + (cfg.buffer_count == 1)) + cfg.buffer_count = PCM_BUF_COUNT; + + audio->pcm_cfg.buffer_count = cfg.buffer_count; + audio->pcm_cfg.buffer_size = cfg.buffer_size; + audio->pcm_cfg.channel_count = cfg.channel_count; + audio->pcm_cfg.sample_rate = cfg.sample_rate; + rc = q6asm_audio_client_buf_alloc(IN, audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_aud_err("%s:session id %d: Buffer Alloc failed\n", + __func__, audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + rc = 0; + pr_debug("%s:session id %d: AUDIO_SET_CONFIG %d %d\n", __func__, + audio->ac->session, audio->pcm_cfg.buffer_count, + audio->pcm_cfg.buffer_size); + break; + } + default: + /* call codec specific ioctl */ + rc = audio->enc_ioctl(file, cmd, arg); + } + mutex_unlock(&audio->lock); + return rc; +} + +ssize_t audio_in_read(struct file *file, + char __user *buf, + size_t count, loff_t *pos) +{ + struct q6audio_in *audio = file->private_data; + const char __user *start = buf; + unsigned char *data; + uint32_t offset = 0; + uint32_t size = 0; + int rc = 0; + uint32_t idx; + struct meta_out_dsp meta; + uint32_t bytes_to_copy = 0; + uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : + (sizeof(unsigned char) + + (sizeof(struct meta_out_dsp)*(audio->buf_cfg.frames_per_buf))); + + pr_debug("%s:session id %d: read - %d\n", __func__, audio->ac->session, + count); + if (!audio->enabled) + return -EFAULT; + mutex_lock(&audio->read_lock); + while (count > 0) { + rc = wait_event_interruptible( + audio->read_wait, + ((atomic_read(&audio->out_count) > 0) || + (audio->stopped) || + audio->rflush || audio->eos_rsp)); + + if (rc < 0) + break; + + if ((audio->stopped && !(atomic_read(&audio->out_count))) || + audio->rflush) { + pr_debug("%s:session id %d: driver in stop state or\ + flush,No more buf to read", __func__, + audio->ac->session); + rc = 0;/* End of File */ + break; + } + if (!(atomic_read(&audio->out_count)) && + (audio->eos_rsp == 1) && + (count >= (sizeof(unsigned char) + + sizeof(struct meta_out_dsp)))) { + unsigned char num_of_frames; + pr_aud_info("%s:session id %d: eos %d at output\n", + __func__, audio->ac->session, audio->eos_rsp); + if (buf != start) + break; + num_of_frames = 0xFF; + if (copy_to_user(buf, &num_of_frames, + sizeof(unsigned char))) { + rc = -EFAULT; + break; + } + buf += sizeof(unsigned char); + meta.frame_size = 0xFFFF; + meta.encoded_pcm_samples = 0xFFFF; + meta.msw_ts = 0x00; + meta.lsw_ts = 0x00; + meta.nflags = AUD_EOS_SET; + audio->eos_rsp = 0; + if (copy_to_user(buf, &meta, sizeof(meta))) { + rc = -EFAULT; + break; + } + buf += sizeof(meta); + break; + } + data = (unsigned char *)q6asm_is_cpu_buf_avail(OUT, audio->ac, + &size, &idx); + if ((count >= (size + mfield_size)) && data) { + if (audio->buf_cfg.meta_info_enable) { + if (copy_to_user(buf, + &audio->out_frame_info[idx][0], + sizeof(unsigned char))) { + rc = -EFAULT; + break; + } + bytes_to_copy = + (size + audio->out_frame_info[idx][1]); + /* Number of frames information copied */ + buf += sizeof(unsigned char); + count -= sizeof(unsigned char); + } else { + offset = audio->out_frame_info[idx][1]; + bytes_to_copy = size; + } + + pr_debug("%s:session id %d: offset=%d nr of frames= %d\n", + __func__, audio->ac->session, + audio->out_frame_info[idx][1], + audio->out_frame_info[idx][0]); + + if (copy_to_user(buf, &data[offset], bytes_to_copy)) { + rc = -EFAULT; + break; + } + count -= bytes_to_copy; + buf += bytes_to_copy; + } else { + pr_aud_err("%s:session id %d: short read data[%p]\ + bytesavail[%d]bytesrequest[%d]\n", __func__, + audio->ac->session, + data, size, count); + } + atomic_dec(&audio->out_count); + q6asm_read(audio->ac); + break; + } + mutex_unlock(&audio->read_lock); + + pr_debug("%s:session id %d: read: %d bytes\n", __func__, + audio->ac->session, (buf-start)); + if (buf > start) + return buf - start; + return rc; +} + +static int extract_meta_info(char *buf, unsigned long *msw_ts, + unsigned long *lsw_ts, unsigned int *flags) +{ + struct meta_in *meta = (struct meta_in *)buf; + *msw_ts = meta->ntimestamp.highpart; + *lsw_ts = meta->ntimestamp.lowpart; + *flags = meta->nflags; + return 0; +} + +ssize_t audio_in_write(struct file *file, + const char __user *buf, + size_t count, loff_t *pos) +{ + struct q6audio_in *audio = file->private_data; + const char __user *start = buf; + size_t xfer = 0; + char *cpy_ptr; + int rc = 0; + unsigned char *data; + uint32_t size = 0; + uint32_t idx = 0; + uint32_t nflags = 0; + unsigned long msw_ts = 0; + unsigned long lsw_ts = 0; + uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : + sizeof(struct meta_in); + + pr_debug("%s:session id %d: to write[%d]\n", __func__, + audio->ac->session, count); + if (!audio->enabled) + return -EFAULT; + mutex_lock(&audio->write_lock); + + while (count > 0) { + rc = wait_event_interruptible(audio->write_wait, + ((atomic_read(&audio->in_count) > 0) || + (audio->stopped) || + (audio->wflush))); + if (rc < 0) + break; + if (audio->stopped || audio->wflush) { + pr_debug("%s: session id %d: stop or flush\n", __func__, + audio->ac->session); + rc = -EBUSY; + break; + } + data = (unsigned char *)q6asm_is_cpu_buf_avail(IN, audio->ac, + &size, &idx); + if (!data) { + pr_debug("%s:session id %d: No buf available\n", + __func__, audio->ac->session); + continue; + } + cpy_ptr = data; + if (audio->buf_cfg.meta_info_enable) { + if (buf == start) { + /* Processing beginning of user buffer */ + if (copy_from_user(cpy_ptr, buf, mfield_size)) { + rc = -EFAULT; + break; + } + /* Check if EOS flag is set and buffer has + * contains just meta field + */ + extract_meta_info(cpy_ptr, &msw_ts, &lsw_ts, + &nflags); + buf += mfield_size; + if (count == mfield_size) { + /* send the EOS and return */ + pr_debug("%s:session id %d: send EOS\ + 0x%8x\n", __func__, + audio->ac->session, nflags); + break; + } + count -= mfield_size; + } else { + pr_debug("%s:session id %d: continuous\ + buffer\n", __func__, audio->ac->session); + } + } + xfer = (count > (audio->pcm_cfg.buffer_size)) ? + (audio->pcm_cfg.buffer_size) : count; + + if (copy_from_user(cpy_ptr, buf, xfer)) { + rc = -EFAULT; + break; + } + rc = q6asm_write(audio->ac, xfer, msw_ts, lsw_ts, 0x00); + if (rc < 0) { + rc = -EFAULT; + break; + } + atomic_dec(&audio->in_count); + count -= xfer; + buf += xfer; + } + mutex_unlock(&audio->write_lock); + pr_debug("%s:session id %d: eos_condition 0x%8x buf[0x%x]\ + start[0x%x]\n", __func__, audio->ac->session, + nflags, (int) buf, (int) start); + if (nflags & AUD_EOS_SET) { + rc = q6asm_cmd(audio->ac, CMD_EOS); + pr_aud_info("%s:session id %d: eos %d at input\n", __func__, + audio->ac->session, audio->eos_rsp); + } + pr_debug("%s:session id %d: Written %d Avail Buf[%d]", __func__, + audio->ac->session, (buf - start - mfield_size), + atomic_read(&audio->in_count)); + if (!rc) { + if (buf > start) + return buf - start; + } + return rc; +} + +int audio_in_release(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = file->private_data; + pr_aud_info("%s: session id %d\n", __func__, audio->ac->session); + mutex_lock(&audio->lock); + audio_in_disable(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + kfree(audio->enc_cfg); + kfree(audio); + return 0; +} + diff --git a/arch/arm/mach-msm/qdsp6v3/audio_utils.h b/arch/arm/mach-msm/qdsp6v3/audio_utils.h new file mode 100644 index 00000000000..da4a520da4a --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_utils.h @@ -0,0 +1,109 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ +#include + +#define FRAME_NUM (8) + +#define PCM_BUF_COUNT (2) + +#define AUD_EOS_SET 0x01 +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 + +#define NO_BUF_ALLOC 0x00 +#define BUF_ALLOC_IN 0x01 +#define BUF_ALLOC_OUT 0x02 +#define BUF_ALLOC_INOUT 0x03 +#define ALIGN_BUF_SIZE(size) ((size + 4095) & (~4095)) + +struct timestamp{ + unsigned long lowpart; + unsigned long highpart; +} __attribute__ ((packed)); + +struct meta_in{ + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __attribute__ ((packed)); + +struct meta_out_dsp{ + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __attribute__ ((packed)); + +struct meta_out{ + unsigned char num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __attribute__ ((packed)); + +struct q6audio_in{ + spinlock_t dsp_lock; + atomic_t in_bytes; + atomic_t in_samples; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + + struct audio_client *ac; + struct msm_audio_stream_config str_cfg; + void *enc_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + void *codec_cfg; + + /* number of buffers available to read/write */ + atomic_t in_count; + atomic_t out_count; + + /* first idx: num of frames per buf, second idx: offset to frame */ + uint32_t out_frame_info[FRAME_NUM][2]; + int eos_rsp; + int opened; + int enabled; + int stopped; + int feedback; /* Flag indicates whether used + in Non Tunnel mode */ + int rflush; + int wflush; + int buf_alloc; + uint16_t min_frame_size; + uint16_t max_frames_per_buf; + long (*enc_ioctl)(struct file *, unsigned int, unsigned long); +}; + +void audio_in_get_dsp_frames(struct q6audio_in *audio, + uint32_t token, uint32_t *payload); +int audio_in_enable(struct q6audio_in *audio); +int audio_in_disable(struct q6audio_in *audio); +int audio_in_buf_alloc(struct q6audio_in *audio); +long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +ssize_t audio_in_read(struct file *file, char __user *buf, + size_t count, loff_t *pos); +ssize_t audio_in_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos); +int audio_in_release(struct inode *inode, struct file *file); + diff --git a/arch/arm/mach-msm/qdsp6v3/audio_wma.c b/arch/arm/mach-msm/qdsp6v3/audio_wma.c new file mode 100644 index 00000000000..f2deb4f5f46 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_wma.c @@ -0,0 +1,1585 @@ +/* wma audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADRV_STATUS_AIO_INTF 0x00000001 /* AIO interface */ +#define ADRV_STATUS_FSYNC 0x00000008 +#define ADRV_STATUS_PAUSE 0x00000010 + +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 +#define AUDWMA_EOS_SET 0x00000001 + +/* Default number of pre-allocated event packets */ +#define AUDWMA_EVENT_NUM 10 + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct timestamp { + unsigned long lowpart; + unsigned long highpart; +} __attribute__ ((packed)); + +struct meta_in { + unsigned char reserved[18]; + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __attribute__ ((packed)); + +struct meta_out_dsp{ + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __attribute__ ((packed)); + +struct dec_meta_out{ + unsigned int reserved[7]; + unsigned int num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __attribute__ ((packed)); + +/* General meta field to store meta info +locally */ +union meta_data { + struct dec_meta_out meta_out; + struct meta_in meta_in; +} __attribute__ ((packed)); + +struct audwma_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audwma_pmem_region { + struct list_head list; + struct file *file; + int fd; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audwma_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; + unsigned long token; + void *kvaddr; + union meta_data meta_info; +}; + +struct q6audio; + +struct audwma_drv_operations { + void (*out_flush) (struct q6audio *); + void (*in_flush) (struct q6audio *); + int (*fsync)(struct q6audio *); +}; + +#define PCM_BUF_COUNT (2) +/* Buffer with meta */ +#define PCM_BUFSZ_MIN ((4*1024) + sizeof(struct dec_meta_out)) + +/* FRAME_NUM must be a power of two */ +#define FRAME_NUM (2) +#define FRAME_SIZE ((4*1024) + sizeof(struct meta_in)) + +struct q6audio { + atomic_t in_bytes; + atomic_t in_samples; + + struct msm_audio_stream_config str_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + struct msm_audio_wma_config_v2 wma_config; + + struct audio_client *ac; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + struct mutex get_event_lock; + wait_queue_head_t cmd_wait; + wait_queue_head_t write_wait; + wait_queue_head_t event_wait; + spinlock_t dsp_lock; + spinlock_t event_queue_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + struct list_head out_queue; /* queue to retain output buffers */ + struct list_head in_queue; /* queue to retain input buffers */ + struct list_head free_event_queue; + struct list_head event_queue; + struct list_head pmem_region_queue; /* protected by lock */ + struct audwma_drv_operations drv_ops; + union msm_audio_event_payload eos_write_payload; + + uint32_t drv_status; + int event_abort; + int eos_rsp; + int eos_flag; + int opened; + int enabled; + int stopped; + int feedback; + int rflush; /* Read flush */ + int wflush; /* Write flush */ +}; + +static int insert_eos_buf(struct q6audio *audio, + struct audwma_buffer_node *buf_node) { + struct dec_meta_out *eos_buf = buf_node->kvaddr; + eos_buf->num_of_frames = 0xFFFFFFFF; + eos_buf->meta_out_dsp[0].offset_to_frame = 0x0; + eos_buf->meta_out_dsp[0].nflags = AUDWMA_EOS_SET; + return sizeof(struct dec_meta_out) + + sizeof(eos_buf->meta_out_dsp[0]); +} + +/* Routine which updates read buffers of driver/dsp, + for flush operation as DSP output might not have proper + value set */ +static int insert_meta_data(struct q6audio *audio, + struct audwma_buffer_node *buf_node) { + struct dec_meta_out *meta_data = buf_node->kvaddr; + meta_data->num_of_frames = 0x0; + meta_data->meta_out_dsp[0].offset_to_frame = 0x0; + meta_data->meta_out_dsp[0].nflags = 0x0; + return sizeof(struct dec_meta_out) + + sizeof(meta_data->meta_out_dsp[0]); +} + +static void extract_meta_info(struct q6audio *audio, + struct audwma_buffer_node *buf_node, int dir) +{ + if (dir) { /* Read */ + if (audio->buf_cfg.meta_info_enable) + memcpy(&buf_node->meta_info.meta_in, + (char *)buf_node->kvaddr, sizeof(struct meta_in)); + else + memset(&buf_node->meta_info.meta_in, + 0, sizeof(struct meta_in)); + pr_debug("i/p: msw_ts 0x%lx lsw_ts 0x%lx nflags 0x%8x\n", + buf_node->meta_info.meta_in.ntimestamp.highpart, + buf_node->meta_info.meta_in.ntimestamp.lowpart, + buf_node->meta_info.meta_in.nflags); + } else { /* Write */ + memcpy((char *)buf_node->kvaddr, + &buf_node->meta_info.meta_out, + sizeof(struct dec_meta_out)); + pr_debug("o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x\n", + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].msw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].lsw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].nflags); + } +} + +static int audwma_pmem_lookup_vaddr(struct q6audio *audio, void *addr, + unsigned long len, struct audwma_pmem_region **region) +{ + struct audwma_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_aud_err("multiple hits for vaddr %p, len %ld\n", addr, len); + list_for_each_entry(region_elt, &audio->pmem_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) + pr_aud_err("\t%p, %ld --> %p\n", region_elt->vaddr, + region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +static unsigned long audwma_pmem_fixup(struct q6audio *audio, void *addr, + unsigned long len, int ref_up, void **kvaddr) +{ + struct audwma_pmem_region *region; + unsigned long paddr; + int ret; + + ret = audwma_pmem_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_aud_err("lookup (%p, %ld) failed\n", addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + pr_debug("found region %p ref_cnt %d\n", region, region->ref_cnt); + paddr = region->paddr + (addr - region->vaddr); + /* provide kernel virtual address for accessing meta information */ + if (kvaddr) + *kvaddr = (void *) (region->kvaddr + (addr - region->vaddr)); + return paddr; +} + +static void audwma_post_event(struct q6audio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audwma_event *e_node = NULL; + unsigned long flags; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audwma_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audwma_event), GFP_ATOMIC); + if (!e_node) { + pr_aud_err("No mem to post event %d\n", type); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + wake_up(&audio->event_wait); +} + +static int audwma_enable(struct q6audio *audio) +{ + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +static int audwma_disable(struct q6audio *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s: inbytes[%d] insamples[%d]\n", __func__, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + /* Close the session */ + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("Failed to close the session rc=%d\n", rc); + audio->stopped = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + pr_debug("enabled[%d]\n", audio->enabled); + return rc; +} + +static int audwma_pause(struct q6audio *audio) +{ + int rc = 0; + + pr_aud_info("%s, enabled = %d\n", __func__, + audio->enabled); + if (audio->enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, rc); + + } else + pr_aud_err("%s: Driver not enabled\n", __func__); + return rc; +} + +static int audwma_flush(struct q6audio *audio) +{ + int rc; + + if (audio->enabled) { + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audwma_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, + rc); + else + audio->drv_status |= ADRV_STATUS_PAUSE; + } + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_aud_err("%s: flush cmd failed rc=%d\n", __func__, rc); + /* Not in stop state, reenable the stream */ + if (audio->stopped == 0) { + rc = audwma_enable(audio); + if (rc) + pr_aud_err("%s:audio re-enable failed\n", __func__); + else { + audio->enabled = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + } + pr_debug("in_bytes %d\n", atomic_read(&audio->in_bytes)); + pr_debug("in_samples %d\n", atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return 0; +} + +static void audwma_async_read(struct q6audio *audio, + struct audwma_buffer_node *buf_node) +{ + struct audio_client *ac; + struct audio_aio_read_param param; + int rc; + + pr_debug("%s: Send read buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.buf_len); + ac = audio->ac; + /* Provide address so driver can append nr frames information */ + param.paddr = buf_node->paddr + + sizeof(struct dec_meta_out); + param.len = buf_node->buf.buf_len - + sizeof(struct dec_meta_out); + param.uid = param.paddr; + /* Write command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_read(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +static void audwma_async_write(struct q6audio *audio, + struct audwma_buffer_node *buf_node) +{ + int rc; + struct audio_client *ac; + struct audio_aio_write_param param; + + pr_debug("%s: Send write buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.data_len); + + ac = audio->ac; + /* Offset with appropriate meta */ + param.paddr = buf_node->paddr + sizeof(struct meta_in); + param.len = buf_node->buf.data_len - sizeof(struct meta_in); + param.msw_ts = buf_node->meta_info.meta_in.ntimestamp.highpart; + param.lsw_ts = buf_node->meta_info.meta_in.ntimestamp.lowpart; + /* If no meta_info enaled, indicate no time stamp valid */ + if (audio->buf_cfg.meta_info_enable) + param.flags = 0; + else + param.flags = 0xFF00; + param.uid = param.paddr; + /* Read command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +/* Write buffer to DSP / Handle Ack from DSP */ +static void audwma_async_write_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwma_buffer_node *used_buf; + + /* No active flush in progress */ + if (audio->wflush) + return; + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->out_queue)); + used_buf = list_first_entry(&audio->out_queue, + struct audwma_buffer_node, list); + if (token == used_buf->token) { + list_del(&used_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + pr_debug("consumed buffer\n"); + event_payload.aio_buf = used_buf->buf; + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(used_buf); + if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s: list is empty, reached EOS in\ + Tunnel\n", __func__); + wake_up(&audio->write_wait); + } + } else { + pr_aud_err("expected=%lx ret=%x\n", used_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +/* Read buffer from DSP / Handle Ack from DSP */ +static void audwma_async_read_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwma_buffer_node *filled_buf; + + /* No active flush in progress */ + if (audio->rflush) + return; + + /* Statistics of read */ + atomic_add(payload[2], &audio->in_bytes); + atomic_add(payload[7], &audio->in_samples); + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->in_queue)); + filled_buf = list_first_entry(&audio->in_queue, + struct audwma_buffer_node, list); + if (token == (filled_buf->token)) { + list_del(&filled_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + event_payload.aio_buf = filled_buf->buf; + /* Read done Buffer due to flush/normal condition + after EOS event, so append EOS buffer */ + if (audio->eos_rsp == 0x1) { + event_payload.aio_buf.data_len = + insert_eos_buf(audio, filled_buf); + /* Reset flag back to indicate eos intimated */ + audio->eos_rsp = 0; + } else { + filled_buf->meta_info.meta_out.num_of_frames = + payload[7]; + pr_debug("nr of frames 0x%8x\n", + filled_buf->meta_info.meta_out.num_of_frames); + event_payload.aio_buf.data_len = payload[2] + \ + payload[3] + \ + sizeof(struct dec_meta_out); + extract_meta_info(audio, filled_buf, 0); + audio->eos_rsp = 0; + } + audwma_post_event(audio, AUDIO_EVENT_READ_DONE, event_payload); + kfree(filled_buf); + } else { + pr_aud_err("expected=%lx ret=%x\n", filled_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +static void q6_audwma_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio *audio = (struct q6audio *)priv; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + pr_debug("%s:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", + __func__, token); + audwma_async_write_ack(audio, token, payload); + break; + case ASM_DATA_EVENT_READ_DONE: + pr_debug("%s:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", + __func__, token); + audwma_async_read_ack(audio, token, payload); + break; + case ASM_DATA_CMDRSP_EOS: + /* EOS Handle */ + pr_debug("%s:ASM_DATA_CMDRSP_EOS\n", __func__); + if (audio->feedback) { /* Non-Tunnel mode */ + audio->eos_rsp = 1; + /* propagate input EOS i/p buffer, + after receiving DSP acknowledgement */ + if (audio->eos_flag && + (audio->eos_write_payload.aio_buf.buf_addr)) { + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + audio->eos_flag = 0; + } + } else { /* Tunnel mode */ + audio->eos_rsp = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + break; + default: + pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); + break; + } +} + +/* ------------------- device --------------------- */ +static void audwma_async_out_flush(struct q6audio *audio) +{ + struct audwma_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + unsigned long flags; + + pr_debug("%s\n", __func__); + /* EOS followed by flush, EOS response not guranteed, free EOS i/p + buffer */ + spin_lock_irqsave(&audio->dsp_lock, flags); + if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) { + pr_debug("%s: EOS followed by flush received,acknowledge eos"\ + " i/p buffer immediately\n", __func__); + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audwma_buffer_node, list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate WRITE_DONE during flush\n", __func__); + } +} + +static void audwma_async_in_flush(struct q6audio *audio) +{ + struct audwma_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + + pr_debug("%s\n", __func__); + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audwma_buffer_node, list); + list_del(&buf_node->list); + /* Forcefull send o/p eos buffer after flush, if no eos response + * received by dsp even after sending eos command */ + if ((audio->eos_rsp != 1) && audio->eos_flag) { + pr_debug("%s: send eos on o/p buffer during flush\n",\ + __func__); + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + audio->eos_flag = 0; + } else { + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_meta_data(audio, buf_node); + } + audwma_post_event(audio, AUDIO_EVENT_READ_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate READ_DONE during flush\n", __func__); + } +} + +static void audwma_ioport_reset(struct q6audio *audio) +{ + if (audio->drv_status & ADRV_STATUS_AIO_INTF) { + /* If fsync is in progress, make sure + * return value of fsync indicates + * abort due to flush + */ + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("fsync in progress\n"); + audio->drv_ops.out_flush(audio); + } else + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + } +} + +static int audwma_events_pending(struct q6audio *audio) +{ + unsigned long flags; + int empty; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + empty = !list_empty(&audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return empty || audio->event_abort; +} + +static void audwma_reset_event_queue(struct q6audio *audio) +{ + unsigned long flags; + struct audwma_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwma_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audwma_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + return; +} + +static long audwma_process_event_req(struct q6audio *audio, void __user * arg) +{ + long rc; + struct msm_audio_event usr_evt; + struct audwma_event *drv_evt = NULL; + int timeout; + unsigned long flags; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) + return -EFAULT; + + timeout = (int)usr_evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout(audio->event_wait, + audwma_events_pending + (audio), + msecs_to_jiffies + (timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible(audio->event_wait, + audwma_events_pending(audio)); + } + + if (rc < 0) + return rc; + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwma_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt.event_type = drv_evt->event_type; + usr_evt.event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + pr_aud_err("Unexpected path\n"); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return -EPERM; + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) { + pr_debug("posted AUDIO_EVENT_WRITE_DONE to user\n"); + mutex_lock(&audio->write_lock); + audwma_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->write_lock); + } else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("posted AUDIO_EVENT_READ_DONE to user\n"); + mutex_lock(&audio->read_lock); + audwma_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->read_lock); + } + + /* Some read buffer might be held up in DSP,release all + Once EOS indicated*/ + if (audio->eos_rsp && !list_empty(&audio->in_queue)) { + pr_debug("Send flush command to release read buffers"\ + "held up in DSP\n"); + audwma_flush(audio); + } + + if (copy_to_user(arg, &usr_evt, sizeof(usr_evt))) + rc = -EFAULT; + + return rc; +} + +static int audwma_pmem_check(struct q6audio *audio, + void *vaddr, unsigned long len) +{ + struct audwma_pmem_region *region_elt; + struct audwma_pmem_region t = {.vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_aud_err("region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audwma_pmem_add(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct audwma_pmem_region *region; + int rc = -EINVAL; + + pr_debug("%s:\n", __func__); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = audwma_pmem_check(audio, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + region->ref_cnt = 0; + pr_debug("add region paddr %lx vaddr %p, len %lu kvaddr %lx\n", + region->paddr, region->vaddr, region->len, region->kvaddr); + list_add_tail(®ion->list, &audio->pmem_region_queue); + + rc = q6asm_memory_map(audio->ac, (uint32_t) paddr, IN, (uint32_t) len, + 1); + if (rc < 0) + pr_aud_err("%s: memory map failed\n", __func__); +end: + return rc; +} + +static int audwma_pmem_remove(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + struct audwma_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_debug("info fd %d vaddr %p\n", info->fd, info->vaddr); + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwma_pmem_region, list); + + if ((region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("region %p in use ref_cnt %d\n", + region, region->ref_cnt); + break; + } + pr_debug("remove region fd %d vaddr %p\n", + info->fd, info->vaddr); + rc = q6asm_memory_unmap(audio->ac, + (uint32_t) region->paddr, IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +/* audio -> lock must be held at this point */ +static int audwma_aio_buf_add(struct q6audio *audio, unsigned dir, + void __user *arg) +{ + unsigned long flags; + struct audwma_buffer_node *buf_node; + + buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) + return -ENOMEM; + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + return -EFAULT; + } + + pr_debug("node %p dir %x buf_addr %p buf_len %d data_len \ + %d\n", buf_node, dir, buf_node->buf.buf_addr, + buf_node->buf.buf_len, buf_node->buf.data_len); + + buf_node->paddr = audwma_pmem_fixup(audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1, + &buf_node->kvaddr); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (!audio->feedback && !buf_node->buf.data_len)) { + kfree(buf_node); + return -EINVAL; + } + extract_meta_info(audio, buf_node, 1); + /* Not a EOS buffer */ + if (!(buf_node->meta_info.meta_in.nflags & AUDWMA_EOS_SET)) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwma_async_write(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->out_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + if (buf_node->meta_info.meta_in.nflags & AUDWMA_EOS_SET) { + if (!audio->wflush) { + pr_debug("%s:Send EOS cmd at i/p\n", __func__); + /* Driver will forcefully post writedone event + once eos ack recived from DSP*/ + audio->eos_write_payload.aio_buf =\ + buf_node->buf; + audio->eos_flag = 1; + audio->eos_rsp = 0; + q6asm_cmd(audio->ac, CMD_EOS); + kfree(buf_node); + } else { /* Flush in progress, send back i/p EOS buffer + as is */ + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + audwma_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(buf_node); + } + } + } else { + /* read */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.buf_len < PCM_BUFSZ_MIN)) { + kfree(buf_node); + return -EINVAL; + } + /* No EOS reached */ + if (!audio->eos_rsp) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwma_async_read(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->in_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + /* EOS reached at input side fake all upcoming read buffer to + indicate the same */ + else { + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + event_payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + pr_debug("%s: propagate READ_DONE as EOS done\n",\ + __func__); + audwma_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(buf_node); + } + } + return 0; +} + +/* TBD: Only useful in tunnel-mode */ +int audwma_async_fsync(struct q6audio *audio) +{ + int rc = 0; + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + pr_aud_info("%s:\n", __func__); + + mutex_lock(&audio->write_lock); + audio->eos_rsp = 0; + + rc = wait_event_interruptible(audio->write_wait, + (list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped); + + if (rc < 0) { + pr_aud_err("%s: wait event for list_empty failed, rc = %d\n", + __func__, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + + if (rc < 0) + pr_aud_err("%s: q6asm_cmd failed, rc = %d", __func__, rc); + + rc = wait_event_interruptible(audio->write_wait, + (audio->eos_rsp || audio->wflush || + audio->stopped)); + + if (rc < 0) { + pr_aud_err("%s: wait event for eos_rsp failed, rc = %d\n", __func__, + rc); + goto done; + } + + if (audio->eos_rsp == 1) { + rc = audwma_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + + if (audio->stopped || audio->wflush) + rc = -EBUSY; + +done: + mutex_unlock(&audio->write_lock); + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +int audwma_fsync(struct file *file, int datasync) +{ + struct q6audio *audio = file->private_data; + + if (!audio->enabled || audio->feedback) + return -EINVAL; + + return audio->drv_ops.fsync(audio); +} + +static void audwma_reset_pmem_region(struct q6audio *audio) +{ + struct audwma_pmem_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwma_pmem_region, list); + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + } + + return; +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t audwma_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t audwma_debug_read(struct file *file, char __user * buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct q6audio *audio = file->private_data; + + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "enabled %d\n", audio->enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "feedback %d\n", audio->feedback); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "rflush %d\n", audio->rflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "inqueue empty %d\n", list_empty(&audio->in_queue)); + n += scnprintf(buffer + n, debug_bufmax - n, + "outqueue empty %d\n", list_empty(&audio->out_queue)); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static const struct file_operations audwma_debug_fops = { + .read = audwma_debug_read, + .open = audwma_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + if (cmd == AUDIO_GET_EVENT) { + pr_debug("AUDIO_GET_EVENT\n"); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audwma_process_event_req(audio, + (void __user *)arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + return rc; + } + + if (cmd == AUDIO_ASYNC_WRITE) { + mutex_lock(&audio->write_lock); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else { + if (audio->enabled) + rc = audwma_aio_buf_add(audio, 1, + (void __user *)arg); + else + rc = -EPERM; + } + mutex_unlock(&audio->write_lock); + return rc; + } + + if (cmd == AUDIO_ASYNC_READ) { + mutex_lock(&audio->read_lock); + if ((audio->feedback) && (audio->enabled)) + rc = audwma_aio_buf_add(audio, 0, + (void __user *)arg); + else + rc = -EPERM; + mutex_unlock(&audio->read_lock); + return rc; + } + + if (cmd == AUDIO_ABORT_GET_EVENT) { + audio->event_abort = 1; + wake_up(&audio->event_wait); + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: { + struct asm_wma_cfg wma_cfg; + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_aud_err("pcm output block config failed\n"); + break; + } + } + wma_cfg.format_tag = audio->wma_config.format_tag; + wma_cfg.ch_cfg = audio->wma_config.numchannels; + wma_cfg.sample_rate = audio->wma_config.samplingrate; + wma_cfg.avg_bytes_per_sec = audio->wma_config.avgbytespersecond; + wma_cfg.block_align = audio->wma_config.block_align; + wma_cfg.valid_bits_per_sample = + audio->wma_config.validbitspersample; + wma_cfg.ch_mask = audio->wma_config.channelmask; + wma_cfg.encode_opt = audio->wma_config.encodeopt; + /* Configure Media format block */ + rc = q6asm_media_format_block_wma(audio->ac, &wma_cfg); + if (rc < 0) { + pr_aud_err("cmd media format block failed\n"); + break; + } + rc = audwma_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + case AUDIO_STOP: { + pr_debug("AUDIO_STOP\n"); + audio->stopped = 1; + audwma_flush(audio); + audio->enabled = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + if (rc < 0) { + pr_aud_err("Audio Stop procedure failed rc=%d\n", rc); + break; + } + break; + } + case AUDIO_PAUSE: { + pr_debug("AUDIO_PAUSE %ld\n", arg); + if (arg == 1) { + rc = audwma_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause FAILED rc=%d\n", __func__, + rc); + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audwma_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", + __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + } + break; + } + case AUDIO_FLUSH: { + pr_debug("AUDIO_FLUSH\n"); + audio->rflush = 1; + audio->wflush = 1; + /* Flush DSP */ + rc = audwma_flush(audio); + /* Flush input / Output buffer in software*/ + audwma_ioport_reset(audio); + if (rc < 0) { + pr_aud_err("AUDIO_FLUSH interrupted\n"); + rc = -EINTR; + } else { + audio->rflush = 0; + audio->wflush = 0; + } + audio->eos_flag = 0; + audio->eos_rsp = 0; + break; + } + case AUDIO_REGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_REGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwma_pmem_add(audio, &info); + break; + } + case AUDIO_DEREGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_DEREGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwma_pmem_remove(audio, &info); + break; + } + case AUDIO_GET_WMA_CONFIG_V2: { + if (copy_to_user((void *)arg, &audio->wma_config, + sizeof(struct msm_audio_wma_config_v2))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_WMA_CONFIG_V2: { + if (copy_from_user(&audio->wma_config, (void *)arg, + sizeof(struct msm_audio_wma_config_v2))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + pr_debug("GET STREAM CFG %d %d\n", cfg.buffer_size, + cfg.buffer_count); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + pr_debug("SET STREAM CONFIG\n"); + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config cfg; + if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_aud_err("Not sufficient permission to" + "change the playback mode\n"); + rc = -EACCES; + break; + } + if ((config.buffer_count > PCM_BUF_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + audio->pcm_cfg.buffer_count = config.buffer_count; + audio->pcm_cfg.buffer_size = config.buffer_size; + audio->pcm_cfg.channel_count = config.channel_count; + audio->pcm_cfg.sample_rate = config.sample_rate; + rc = 0; + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]", __func__, + audio->ac->session, cfg.meta_info_enable); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *)arg, &audio->ac->session, + sizeof(unsigned short))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct q6audio *audio = file->private_data; + mutex_lock(&audio->lock); + audwma_disable(audio); + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + audwma_reset_pmem_region(audio); + audio->event_abort = 1; + wake_up(&audio->event_wait); + audwma_reset_event_queue(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + mutex_destroy(&audio->lock); + mutex_destroy(&audio->read_lock); + mutex_destroy(&audio->write_lock); + mutex_destroy(&audio->get_event_lock); +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio); + pr_aud_info("%s: wma_decoder success\n", __func__); + return 0; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio *audio = NULL; + int rc = 0; + int i; + struct audwma_event *e_node = NULL; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_wma_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.sample_rate = 48000; + audio->pcm_cfg.channel_count = 2; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audwma_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + /* Only AIO interface */ + if (file->f_flags & O_NONBLOCK) { + pr_debug("set to aio interface\n"); + audio->drv_status |= ADRV_STATUS_AIO_INTF; + audio->drv_ops.out_flush = audwma_async_out_flush; + audio->drv_ops.in_flush = audwma_async_in_flush; + audio->drv_ops.fsync = audwma_async_fsync; + q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + } else { + pr_aud_err("SIO interface not supported\n"); + rc = -EACCES; + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_WMA_V9); + if (rc < 0) { + pr_aud_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open WMA decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_WMA_V9); + if (rc < 0) { + pr_aud_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_aud_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + spin_lock_init(&audio->event_queue_lock); + init_waitqueue_head(&audio->cmd_wait); + init_waitqueue_head(&audio->write_wait); + init_waitqueue_head(&audio->event_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->in_queue); + INIT_LIST_HEAD(&audio->pmem_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + + audio->drv_ops.out_flush(audio); + audio->opened = 1; + file->private_data = audio; + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_wma_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audwma_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + for (i = 0; i < AUDWMA_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audwma_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_aud_err("event pkt alloc failed\n"); + break; + } + } + pr_aud_info("%s:wma decoder open success, session_id = %d\n", __func__, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_wma_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audwma_fsync, +}; + +struct miscdevice audwma_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_wma", + .fops = &audio_wma_fops, +}; + +static int __init audio_wma_init(void) +{ + return misc_register(&audwma_misc); +} + +device_initcall(audio_wma_init); diff --git a/arch/arm/mach-msm/qdsp6v3/audio_wmapro.c b/arch/arm/mach-msm/qdsp6v3/audio_wmapro.c new file mode 100644 index 00000000000..d0e67af8eab --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/audio_wmapro.c @@ -0,0 +1,1644 @@ +/* wmapro audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADRV_STATUS_AIO_INTF 0x00000001 /* AIO interface */ +#define ADRV_STATUS_FSYNC 0x00000008 +#define ADRV_STATUS_PAUSE 0x00000010 + +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 +#define AUDWMAPRO_EOS_SET 0x00000001 + +/* Default number of pre-allocated event packets */ +#define AUDWMAPRO_EVENT_NUM 10 + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct timestamp { + unsigned long lowpart; + unsigned long highpart; +} __attribute__ ((packed)); + +struct meta_in { + unsigned char reserved[18]; + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __attribute__ ((packed)); + +struct meta_out_dsp{ + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __attribute__ ((packed)); + +struct dec_meta_out{ + unsigned int reserved[7]; + unsigned int num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __attribute__ ((packed)); + +/* General meta field to store meta info +locally */ +union meta_data { + struct dec_meta_out meta_out; + struct meta_in meta_in; +} __attribute__ ((packed)); + +struct audwmapro_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audwmapro_pmem_region { + struct list_head list; + struct file *file; + int fd; + void *vaddr; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audwmapro_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; + unsigned long token; + void *kvaddr; + union meta_data meta_info; +}; + +struct q6audio; + +struct audwmapro_drv_operations { + void (*out_flush) (struct q6audio *); + void (*in_flush) (struct q6audio *); + int (*fsync)(struct q6audio *); +}; + +#define PCM_BUF_COUNT (2) +/* Buffer with meta */ +#define PCM_BUFSZ_MIN ((4*1024) + sizeof(struct dec_meta_out)) + +/* FRAME_NUM must be a power of two */ +#define FRAME_NUM (2) +#define FRAME_SIZE ((4*1024) + sizeof(struct meta_in)) + +struct q6audio { + atomic_t in_bytes; + atomic_t in_samples; + + struct msm_audio_stream_config str_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + struct msm_audio_wmapro_config wmapro_config; + + struct audio_client *ac; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + struct mutex get_event_lock; + wait_queue_head_t cmd_wait; + wait_queue_head_t write_wait; + wait_queue_head_t event_wait; + spinlock_t dsp_lock; + spinlock_t event_queue_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + struct list_head out_queue; /* queue to retain output buffers */ + struct list_head in_queue; /* queue to retain input buffers */ + struct list_head free_event_queue; + struct list_head event_queue; + struct list_head pmem_region_queue; /* protected by lock */ + struct audwmapro_drv_operations drv_ops; + union msm_audio_event_payload eos_write_payload; + + uint32_t drv_status; + int event_abort; + int eos_rsp; + int eos_flag; + int opened; + int enabled; + int stopped; + int feedback; + int rflush; /* Read flush */ + int wflush; /* Write flush */ +}; + +static int insert_eos_buf(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) { + struct dec_meta_out *eos_buf = buf_node->kvaddr; + eos_buf->num_of_frames = 0xFFFFFFFF; + eos_buf->meta_out_dsp[0].offset_to_frame = 0x0; + eos_buf->meta_out_dsp[0].nflags = AUDWMAPRO_EOS_SET; + return sizeof(struct dec_meta_out) + + sizeof(eos_buf->meta_out_dsp[0]); +} + +/* Routine which updates read buffers of driver/dsp, + for flush operation as DSP output might not have proper + value set */ +static int insert_meta_data(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) { + struct dec_meta_out *meta_data = buf_node->kvaddr; + meta_data->num_of_frames = 0x0; + meta_data->meta_out_dsp[0].offset_to_frame = 0x0; + meta_data->meta_out_dsp[0].nflags = 0x0; + return sizeof(struct dec_meta_out) + + sizeof(meta_data->meta_out_dsp[0]); +} + +static void extract_meta_info(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node, int dir) +{ + if (dir) { /* Read */ + if (audio->buf_cfg.meta_info_enable) + memcpy(&buf_node->meta_info.meta_in, + (char *)buf_node->kvaddr, sizeof(struct meta_in)); + else + memset(&buf_node->meta_info.meta_in, + 0, sizeof(struct meta_in)); + pr_debug("i/p: msw_ts 0x%lx lsw_ts 0x%lx nflags 0x%8x\n", + buf_node->meta_info.meta_in.ntimestamp.highpart, + buf_node->meta_info.meta_in.ntimestamp.lowpart, + buf_node->meta_info.meta_in.nflags); + } else { /* Write */ + memcpy((char *)buf_node->kvaddr, + &buf_node->meta_info.meta_out, + sizeof(struct dec_meta_out)); + pr_debug("o/p: msw_ts 0x%8x lsw_ts 0x%8x nflags 0x%8x\n", + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].msw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].lsw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].nflags); + } +} + +static int audwmapro_pmem_lookup_vaddr(struct q6audio *audio, void *addr, + unsigned long len, struct audwmapro_pmem_region **region) +{ + struct audwmapro_pmem_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) { + /* offset since we could pass vaddr inside a registerd + * pmem buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_aud_err("multiple hits for vaddr %p, len %ld\n", addr, len); + list_for_each_entry(region_elt, &audio->pmem_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len) + pr_aud_err("\t%p, %ld --> %p\n", region_elt->vaddr, + region_elt->len, + (void *)region_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +static unsigned long audwmapro_pmem_fixup(struct q6audio *audio, void *addr, + unsigned long len, int ref_up, void **kvaddr) +{ + struct audwmapro_pmem_region *region; + unsigned long paddr; + int ret; + + ret = audwmapro_pmem_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_aud_err("lookup (%p, %ld) failed\n", addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + pr_debug("found region %p ref_cnt %d\n", region, region->ref_cnt); + paddr = region->paddr + (addr - region->vaddr); + /* provide kernel virtual address for accessing meta information */ + if (kvaddr) + *kvaddr = (void *) (region->kvaddr + (addr - region->vaddr)); + return paddr; +} + +static void audwmapro_post_event(struct q6audio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audwmapro_event *e_node = NULL; + unsigned long flags; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audwmapro_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audwmapro_event), GFP_ATOMIC); + if (!e_node) { + pr_aud_err("No mem to post event %d\n", type); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + wake_up(&audio->event_wait); +} + +static int audwmapro_enable(struct q6audio *audio) +{ + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +static int audwmapro_disable(struct q6audio *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s: inbytes[%d] insamples[%d]\n", __func__, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + /* Close the session */ + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_aud_err("Failed to close the session rc=%d\n", rc); + audio->stopped = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + pr_debug("enabled[%d]\n", audio->enabled); + return rc; +} + +static int audwmapro_pause(struct q6audio *audio) +{ + int rc = 0; + + pr_aud_info("%s, enabled = %d\n", __func__, + audio->enabled); + if (audio->enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, rc); + + } else + pr_aud_err("%s: Driver not enabled\n", __func__); + return rc; +} + +static int audwmapro_flush(struct q6audio *audio) +{ + int rc; + + if (audio->enabled) { + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audwmapro_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause cmd failed rc=%d\n", __func__, + rc); + else + audio->drv_status |= ADRV_STATUS_PAUSE; + } + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_aud_err("%s: flush cmd failed rc=%d\n", __func__, rc); + /* Not in stop state, reenable the stream */ + if (audio->stopped == 0) { + rc = audwmapro_enable(audio); + if (rc) + pr_aud_err("%s:audio re-enable failed\n", __func__); + else { + audio->enabled = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + } + pr_debug("in_bytes %d\n", atomic_read(&audio->in_bytes)); + pr_debug("in_samples %d\n", atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return 0; +} + +static void audwmapro_async_read(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) +{ + struct audio_client *ac; + struct audio_aio_read_param param; + int rc; + + pr_debug("%s: Send read buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.buf_len); + ac = audio->ac; + /* Provide address so driver can append nr frames information */ + param.paddr = buf_node->paddr + + sizeof(struct dec_meta_out); + param.len = buf_node->buf.buf_len - + sizeof(struct dec_meta_out); + param.uid = param.paddr; + /* Write command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_read(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +static void audwmapro_async_write(struct q6audio *audio, + struct audwmapro_buffer_node *buf_node) +{ + int rc; + struct audio_client *ac; + struct audio_aio_write_param param; + + pr_debug("%s: Send write buff %p phy %lx len %d\n", __func__, buf_node, + buf_node->paddr, buf_node->buf.data_len); + + ac = audio->ac; + /* Offset with appropriate meta */ + param.paddr = buf_node->paddr + sizeof(struct meta_in); + param.len = buf_node->buf.data_len - sizeof(struct meta_in); + param.msw_ts = buf_node->meta_info.meta_in.ntimestamp.highpart; + param.lsw_ts = buf_node->meta_info.meta_in.ntimestamp.lowpart; + /* If no meta_info enaled, indicate no time stamp valid */ + if (audio->buf_cfg.meta_info_enable) + param.flags = 0; + else + param.flags = 0xFF00; + param.uid = param.paddr; + /* Read command will populate paddr as token */ + buf_node->token = param.paddr; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_aud_err("%s:failed\n", __func__); +} + +/* Write buffer to DSP / Handle Ack from DSP */ +static void audwmapro_async_write_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwmapro_buffer_node *used_buf; + + /* No active flush in progress */ + if (audio->wflush) + return; + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->out_queue)); + used_buf = list_first_entry(&audio->out_queue, + struct audwmapro_buffer_node, list); + if (token == used_buf->token) { + list_del(&used_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + pr_debug("consumed buffer\n"); + event_payload.aio_buf = used_buf->buf; + audwmapro_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(used_buf); + if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s: list is empty, reached EOS in\ + Tunnel\n", __func__); + wake_up(&audio->write_wait); + } + } else { + pr_aud_err("expected=%lx ret=%x\n", used_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +/* Read buffer from DSP / Handle Ack from DSP */ +static void audwmapro_async_read_ack(struct q6audio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audwmapro_buffer_node *filled_buf; + + /* No active flush in progress */ + if (audio->rflush) + return; + + /* Statistics of read */ + atomic_add(payload[2], &audio->in_bytes); + atomic_add(payload[7], &audio->in_samples); + + spin_lock_irqsave(&audio->dsp_lock, flags); + BUG_ON(list_empty(&audio->in_queue)); + filled_buf = list_first_entry(&audio->in_queue, + struct audwmapro_buffer_node, list); + if (token == (filled_buf->token)) { + list_del(&filled_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + event_payload.aio_buf = filled_buf->buf; + /* Read done Buffer due to flush/normal condition + after EOS event, so append EOS buffer */ + if (audio->eos_rsp == 0x1) { + event_payload.aio_buf.data_len = + insert_eos_buf(audio, filled_buf); + /* Reset flag back to indicate eos intimated */ + audio->eos_rsp = 0; + } else { + filled_buf->meta_info.meta_out.num_of_frames = + payload[7]; + pr_debug("nr of frames 0x%8x\n", + filled_buf->meta_info.meta_out.num_of_frames); + event_payload.aio_buf.data_len = payload[2] + \ + payload[3] + \ + sizeof(struct dec_meta_out); + extract_meta_info(audio, filled_buf, 0); + audio->eos_rsp = 0; + } + audwmapro_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(filled_buf); + } else { + pr_aud_err("expected=%lx ret=%x\n", filled_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +static void q6_audwmapro_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio *audio = (struct q6audio *)priv; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + pr_debug("%s:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", + __func__, token); + audwmapro_async_write_ack(audio, token, payload); + break; + case ASM_DATA_EVENT_READ_DONE: + pr_debug("%s:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", + __func__, token); + audwmapro_async_read_ack(audio, token, payload); + break; + case ASM_DATA_CMDRSP_EOS: + /* EOS Handle */ + pr_debug("%s:ASM_DATA_CMDRSP_EOS\n", __func__); + if (audio->feedback) { /* Non-Tunnel mode */ + audio->eos_rsp = 1; + /* propagate input EOS i/p buffer, + after receiving DSP acknowledgement */ + if (audio->eos_flag && + (audio->eos_write_payload.aio_buf.buf_addr)) { + audwmapro_post_event(audio, + AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + audio->eos_flag = 0; + } + } else { /* Tunnel mode */ + audio->eos_rsp = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + break; + default: + pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); + break; + } +} + +/* ------------------- device --------------------- */ +static void audwmapro_async_out_flush(struct q6audio *audio) +{ + struct audwmapro_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + unsigned long flags; + + pr_debug("%s\n", __func__); + /* EOS followed by flush, EOS response not guranteed, free EOS i/p + buffer */ + spin_lock_irqsave(&audio->dsp_lock, flags); + if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) { + pr_debug("%s: EOS followed by flush received,acknowledge eos"\ + " i/p buffer immediately\n", __func__); + audwmapro_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audwmapro_buffer_node, list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audwmapro_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate WRITE_DONE during flush\n", __func__); + } +} + +static void audwmapro_async_in_flush(struct q6audio *audio) +{ + struct audwmapro_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + + pr_debug("%s\n", __func__); + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audwmapro_buffer_node, list); + list_del(&buf_node->list); + /* Forcefull send o/p eos buffer after flush, if no eos response + * received by dsp even after sending eos command */ + if ((audio->eos_rsp != 1) && audio->eos_flag) { + pr_debug("%s: send eos on o/p buffer during flush\n",\ + __func__); + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + audio->eos_flag = 0; + } else { + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_meta_data(audio, buf_node); + } + audwmapro_post_event(audio, AUDIO_EVENT_READ_DONE, payload); + kfree(buf_node); + pr_debug("%s: Propagate READ_DONE during flush\n", __func__); + } +} + +static void audwmapro_ioport_reset(struct q6audio *audio) +{ + if (audio->drv_status & ADRV_STATUS_AIO_INTF) { + /* If fsync is in progress, make sure + * return value of fsync indicates + * abort due to flush + */ + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("fsync in progress\n"); + audio->drv_ops.out_flush(audio); + } else + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + } +} + +static int audwmapro_events_pending(struct q6audio *audio) +{ + unsigned long flags; + int empty; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + empty = !list_empty(&audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return empty || audio->event_abort; +} + +static void audwmapro_reset_event_queue(struct q6audio *audio) +{ + unsigned long flags; + struct audwmapro_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwmapro_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audwmapro_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + return; +} + +static long audwmapro_process_event_req(struct q6audio *audio, + void __user *arg) +{ + long rc; + struct msm_audio_event usr_evt; + struct audwmapro_event *drv_evt = NULL; + int timeout; + unsigned long flags; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) + return -EFAULT; + + timeout = (int)usr_evt.timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout(audio->event_wait, + audwmapro_events_pending + (audio), + msecs_to_jiffies + (timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible(audio->event_wait, + audwmapro_events_pending(audio)); + } + + if (rc < 0) + return rc; + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audwmapro_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt.event_type = drv_evt->event_type; + usr_evt.event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + pr_aud_err("Unexpected path\n"); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return -EPERM; + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) { + pr_debug("posted AUDIO_EVENT_WRITE_DONE to user\n"); + mutex_lock(&audio->write_lock); + audwmapro_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->write_lock); + } else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("posted AUDIO_EVENT_READ_DONE to user\n"); + mutex_lock(&audio->read_lock); + audwmapro_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->read_lock); + } + + /* Some read buffer might be held up in DSP,release all + Once EOS indicated*/ + if (audio->eos_rsp && !list_empty(&audio->in_queue)) { + pr_debug("Send flush command to release read buffers"\ + " held up in DSP\n"); + audwmapro_flush(audio); + } + + if (copy_to_user(arg, &usr_evt, sizeof(usr_evt))) + rc = -EFAULT; + + return rc; +} + +static int audwmapro_pmem_check(struct q6audio *audio, + void *vaddr, unsigned long len) +{ + struct audwmapro_pmem_region *region_elt; + struct audwmapro_pmem_region t = {.vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->pmem_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_aud_err("region (vaddr %p len %ld)" + " clashes with registered region" + " (vaddr %p paddr %p len %ld)\n", + vaddr, len, + region_elt->vaddr, + (void *)region_elt->paddr, region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audwmapro_pmem_add(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + unsigned long paddr, kvaddr, len; + struct file *file; + struct audwmapro_pmem_region *region; + int rc = -EINVAL; + + pr_debug("%s\n", __func__); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) { + kfree(region); + goto end; + } + + rc = audwmapro_pmem_check(audio, info->vaddr, len); + if (rc < 0) { + put_pmem_file(file); + kfree(region); + goto end; + } + + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->file = file; + region->ref_cnt = 0; + pr_debug("add region paddr %lx vaddr %p, len %lu kvaddr %lx\n", + region->paddr, region->vaddr, region->len, region->kvaddr); + list_add_tail(®ion->list, &audio->pmem_region_queue); + + rc = q6asm_memory_map(audio->ac, (uint32_t) paddr, IN, (uint32_t) len, + 1); + if (rc < 0) + pr_aud_err("%s: memory map failed\n", __func__); +end: + return rc; +} + +static int audwmapro_pmem_remove(struct q6audio *audio, + struct msm_audio_pmem_info *info) +{ + struct audwmapro_pmem_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_debug("info fd %d vaddr %p\n", info->fd, info->vaddr); + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwmapro_pmem_region, list); + + if ((region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("region %p in use ref_cnt %d\n", + region, region->ref_cnt); + break; + } + pr_debug("remove region fd %d vaddr %p\n", + info->fd, info->vaddr); + rc = q6asm_memory_unmap(audio->ac, + (uint32_t) region->paddr, IN); + if (rc < 0) + pr_aud_err("%s: memory unmap failed\n", __func__); + + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +/* audio -> lock must be held at this point */ +static int audwmapro_aio_buf_add(struct q6audio *audio, unsigned dir, + void __user *arg) +{ + unsigned long flags; + struct audwmapro_buffer_node *buf_node; + + buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) + return -ENOMEM; + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + return -EFAULT; + } + + pr_debug("node %p dir %x buf_addr %p buf_len %d data_len \ + %d\n", buf_node, dir, buf_node->buf.buf_addr, + buf_node->buf.buf_len, buf_node->buf.data_len); + + buf_node->paddr = audwmapro_pmem_fixup(audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1, + &buf_node->kvaddr); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (!audio->feedback && !buf_node->buf.data_len)) { + kfree(buf_node); + return -EINVAL; + } + extract_meta_info(audio, buf_node, 1); + /* Not a EOS buffer */ + if (!(buf_node->meta_info.meta_in.nflags & AUDWMAPRO_EOS_SET)) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwmapro_async_write(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->out_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + if (buf_node->meta_info.meta_in.nflags & AUDWMAPRO_EOS_SET) { + if (!audio->wflush) { + pr_debug("%s:Send EOS cmd at i/p\n", __func__); + /* Driver will forcefully post writedone event + once eos ack recived from DSP*/ + audio->eos_write_payload.aio_buf =\ + buf_node->buf; + audio->eos_flag = 1; + audio->eos_rsp = 0; + q6asm_cmd(audio->ac, CMD_EOS); + kfree(buf_node); + } else { /* Flush in progress, send back i/p EOS buffer + as is */ + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + audwmapro_post_event(audio, + AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(buf_node); + } + } + } else { + /* read */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.buf_len < PCM_BUFSZ_MIN)) { + kfree(buf_node); + return -EINVAL; + } + /* No EOS reached */ + if (!audio->eos_rsp) { + spin_lock_irqsave(&audio->dsp_lock, flags); + audwmapro_async_read(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->in_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + /* EOS reached at input side fake all upcoming read buffer to + indicate the same */ + else { + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + event_payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + pr_debug("%s: propagate READ_DONE as EOS done\n",\ + __func__); + audwmapro_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(buf_node); + } + } + return 0; +} + +/* TBD: Only useful in tunnel-mode */ +int audwmapro_async_fsync(struct q6audio *audio) +{ + int rc = 0; + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + pr_aud_info("%s:\n", __func__); + + mutex_lock(&audio->write_lock); + audio->eos_rsp = 0; + + rc = wait_event_interruptible(audio->write_wait, + (list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped); + + if (rc < 0) { + pr_aud_err("%s: wait event for list_empty failed, rc = %d\n", + __func__, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + + if (rc < 0) + pr_aud_err("%s: q6asm_cmd failed, rc = %d", __func__, rc); + + rc = wait_event_interruptible(audio->write_wait, + (audio->eos_rsp || audio->wflush || + audio->stopped)); + + if (rc < 0) { + pr_aud_err("%s: wait event for eos_rsp failed, rc = %d\n", __func__, + rc); + goto done; + } + + if (audio->eos_rsp == 1) { + rc = audwmapro_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + + if (audio->stopped || audio->wflush) + rc = -EBUSY; + +done: + mutex_unlock(&audio->write_lock); + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +int audwmapro_fsync(struct file *file, int datasync) +{ + struct q6audio *audio = file->private_data; + + if (!audio->enabled || audio->feedback) + return -EINVAL; + + return audio->drv_ops.fsync(audio); +} + +static void audwmapro_reset_pmem_region(struct q6audio *audio) +{ + struct audwmapro_pmem_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->pmem_region_queue) { + region = list_entry(ptr, struct audwmapro_pmem_region, list); + list_del(®ion->list); + put_pmem_file(region->file); + kfree(region); + } + + return; +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t audwmapro_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t audwmapro_debug_read(struct file *file, char __user * buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct q6audio *audio = file->private_data; + + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "enabled %d\n", audio->enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "feedback %d\n", audio->feedback); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "rflush %d\n", audio->rflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "inqueue empty %d\n", list_empty(&audio->in_queue)); + n += scnprintf(buffer + n, debug_bufmax - n, + "outqueue empty %d\n", list_empty(&audio->out_queue)); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} + +static const struct file_operations audwmapro_debug_fops = { + .read = audwmapro_debug_read, + .open = audwmapro_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + if (cmd == AUDIO_GET_EVENT) { + pr_debug("AUDIO_GET_EVENT\n"); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audwmapro_process_event_req(audio, + (void __user *)arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + return rc; + } + + if (cmd == AUDIO_ASYNC_WRITE) { + mutex_lock(&audio->write_lock); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else { + if (audio->enabled) + rc = audwmapro_aio_buf_add(audio, 1, + (void __user *)arg); + else + rc = -EPERM; + } + mutex_unlock(&audio->write_lock); + return rc; + } + + if (cmd == AUDIO_ASYNC_READ) { + mutex_lock(&audio->read_lock); + if ((audio->feedback) && (audio->enabled)) + rc = audwmapro_aio_buf_add(audio, 0, + (void __user *)arg); + else + rc = -EPERM; + mutex_unlock(&audio->read_lock); + return rc; + } + + if (cmd == AUDIO_ABORT_GET_EVENT) { + audio->event_abort = 1; + wake_up(&audio->event_wait); + return 0; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: { + struct asm_wmapro_cfg wmapro_cfg; + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_aud_err("pcm output block config failed\n"); + break; + } + } + if ((audio->wmapro_config.formattag == 0x162) || + (audio->wmapro_config.formattag == 0x166)) { + wmapro_cfg.format_tag = audio->wmapro_config.formattag; + } else { + pr_aud_err("%s:AUDIO_START failed: formattag = %d\n", + __func__, audio->wmapro_config.formattag); + rc = -EINVAL; + break; + } + if ((audio->wmapro_config.numchannels == 1) || + (audio->wmapro_config.numchannels == 2)) { + wmapro_cfg.ch_cfg = audio->wmapro_config.numchannels; + } else { + pr_aud_err("%s:AUDIO_START failed: channels = %d\n", + __func__, audio->wmapro_config.numchannels); + rc = -EINVAL; + break; + } + if ((audio->wmapro_config.samplingrate <= 48000) || + (audio->wmapro_config.samplingrate > 0)) { + wmapro_cfg.sample_rate = + audio->wmapro_config.samplingrate; + } else { + pr_aud_err("%s:AUDIO_START failed: sample_rate = %d\n", + __func__, audio->wmapro_config.samplingrate); + rc = -EINVAL; + break; + } + wmapro_cfg.avg_bytes_per_sec = + audio->wmapro_config.avgbytespersecond; + if ((audio->wmapro_config.asfpacketlength <= 13376) || + (audio->wmapro_config.asfpacketlength > 0)) { + wmapro_cfg.block_align = + audio->wmapro_config.asfpacketlength; + } else { + pr_aud_err("%s:AUDIO_START failed: block_align = %d\n", + __func__, audio->wmapro_config.asfpacketlength); + rc = -EINVAL; + break; + } + if (audio->wmapro_config.validbitspersample == 16) { + wmapro_cfg.valid_bits_per_sample = + audio->wmapro_config.validbitspersample; + } else { + pr_aud_err("%s:AUDIO_START failed: bitspersample = %d\n", + __func__, + audio->wmapro_config.validbitspersample); + rc = -EINVAL; + break; + } + if ((audio->wmapro_config.channelmask == 4) || + (audio->wmapro_config.channelmask == 3)) { + wmapro_cfg.ch_mask = audio->wmapro_config.channelmask; + } else { + pr_aud_err("%s:AUDIO_START failed: channel_mask = %d\n", + __func__, audio->wmapro_config.channelmask); + rc = -EINVAL; + break; + } + wmapro_cfg.encode_opt = audio->wmapro_config.encodeopt; + wmapro_cfg.adv_encode_opt = + audio->wmapro_config.advancedencodeopt; + wmapro_cfg.adv_encode_opt2 = + audio->wmapro_config.advancedencodeopt2; + /* Configure Media format block */ + rc = q6asm_media_format_block_wmapro(audio->ac, &wmapro_cfg); + if (rc < 0) { + pr_aud_err("cmd media format block failed\n"); + break; + } + rc = audwmapro_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + case AUDIO_STOP: { + pr_debug("AUDIO_STOP\n"); + audio->stopped = 1; + audwmapro_flush(audio); + audio->enabled = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + if (rc < 0) { + pr_aud_err("Audio Stop procedure failed rc=%d\n", rc); + break; + } + break; + } + case AUDIO_PAUSE: { + pr_debug("AUDIO_PAUSE %ld\n", arg); + if (arg == 1) { + rc = audwmapro_pause(audio); + if (rc < 0) + pr_aud_err("%s: pause FAILED rc=%d\n", __func__, + rc); + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audwmapro_enable(audio); + if (rc) + pr_aud_err("%s: audio enable failed\n", + __func__); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + } + break; + } + case AUDIO_FLUSH: { + pr_debug("AUDIO_FLUSH\n"); + audio->rflush = 1; + audio->wflush = 1; + /* Flush DSP */ + rc = audwmapro_flush(audio); + /* Flush input / Output buffer in software*/ + audwmapro_ioport_reset(audio); + if (rc < 0) { + pr_aud_err("AUDIO_FLUSH interrupted\n"); + rc = -EINTR; + } else { + audio->rflush = 0; + audio->wflush = 0; + } + audio->eos_flag = 0; + audio->eos_rsp = 0; + break; + } + case AUDIO_REGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_REGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwmapro_pmem_add(audio, &info); + break; + } + case AUDIO_DEREGISTER_PMEM: { + struct msm_audio_pmem_info info; + pr_debug("AUDIO_DEREGISTER_PMEM\n"); + if (copy_from_user(&info, (void *)arg, sizeof(info))) + rc = -EFAULT; + else + rc = audwmapro_pmem_remove(audio, &info); + break; + } + case AUDIO_GET_WMAPRO_CONFIG: { + if (copy_to_user((void *)arg, &audio->wmapro_config, + sizeof(struct msm_audio_wmapro_config))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_WMAPRO_CONFIG: { + if (copy_from_user(&audio->wmapro_config, (void *)arg, + sizeof(struct msm_audio_wmapro_config))) { + rc = -EFAULT; + break; + } + break; + } + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + pr_debug("GET STREAM CFG %d %d\n", cfg.buffer_size, + cfg.buffer_count); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + pr_debug("SET STREAM CONFIG\n"); + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + rc = 0; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config cfg; + if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(cfg))) + rc = -EFAULT; + break; + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + if (copy_from_user(&config, (void *)arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_aud_err("Not sufficient permission to" + "change the playback mode\n"); + rc = -EACCES; + break; + } + if ((config.buffer_count > PCM_BUF_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + audio->pcm_cfg.buffer_count = config.buffer_count; + audio->pcm_cfg.buffer_size = config.buffer_size; + audio->pcm_cfg.channel_count = config.channel_count; + audio->pcm_cfg.sample_rate = config.sample_rate; + rc = 0; + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d]", __func__, + audio->ac->session, cfg.meta_info_enable); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d]\ + framesperbuf[%d]\n", __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *)arg, &audio->ac->session, + sizeof(unsigned short))) { + rc = -EFAULT; + } + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&audio->lock); + return rc; +} + +static int audio_release(struct inode *inode, struct file *file) +{ + struct q6audio *audio = file->private_data; + mutex_lock(&audio->lock); + audwmapro_disable(audio); + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + audwmapro_reset_pmem_region(audio); + audio->event_abort = 1; + wake_up(&audio->event_wait); + audwmapro_reset_event_queue(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + mutex_destroy(&audio->lock); + mutex_destroy(&audio->read_lock); + mutex_destroy(&audio->write_lock); + mutex_destroy(&audio->get_event_lock); +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio); + pr_aud_info("%s: wmapro decoder success\n", __func__); + return 0; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio *audio = NULL; + int rc = 0; + int i; + struct audwmapro_event *e_node = NULL; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_wmapro_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.sample_rate = 48000; + audio->pcm_cfg.channel_count = 2; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audwmapro_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + /* Only AIO interface */ + if (file->f_flags & O_NONBLOCK) { + pr_debug("set to aio interface\n"); + audio->drv_status |= ADRV_STATUS_AIO_INTF; + audio->drv_ops.out_flush = audwmapro_async_out_flush; + audio->drv_ops.in_flush = audwmapro_async_in_flush; + audio->drv_ops.fsync = audwmapro_async_fsync; + q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + } else { + pr_aud_err("SIO interface not supported\n"); + rc = -EACCES; + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_WMA_V10PRO); + if (rc < 0) { + pr_aud_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open WMA decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_WMA_V10PRO); + if (rc < 0) { + pr_aud_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_aud_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + spin_lock_init(&audio->event_queue_lock); + init_waitqueue_head(&audio->cmd_wait); + init_waitqueue_head(&audio->write_wait); + init_waitqueue_head(&audio->event_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->in_queue); + INIT_LIST_HEAD(&audio->pmem_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + + audio->drv_ops.out_flush(audio); + audio->opened = 1; + file->private_data = audio; + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_wmapro_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audwmapro_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + for (i = 0; i < AUDWMAPRO_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audwmapro_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_aud_err("event pkt alloc failed\n"); + break; + } + } + pr_aud_info("%s:wmapro decoder open success, session_id = %d\n", __func__, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_wmapro_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audwmapro_fsync, +}; + +struct miscdevice audwmapro_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_wmapro", + .fops = &audio_wmapro_fops, +}; + +static int __init audio_wmapro_init(void) +{ + return misc_register(&audwmapro_misc); +} + +device_initcall(audio_wmapro_init); diff --git a/arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c b/arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c new file mode 100644 index 00000000000..00ba57e5aa0 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/board-msm8x60-audio.c @@ -0,0 +1,1901 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "timpani_profile_8x60.h" + +#ifdef CONFIG_MACH_VIGOR +#include "timpani_profile_8x60_vigor.h" +#else +#include "timpani_profile_8x60_lead.h" +#endif + +#include +#include "snddev_mi2s.h" +#include + +#include "snddev_virtual.h" + +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_hsed_config; +static void snddev_hsed_config_modify_setting(int type); +static void snddev_hsed_config_restore_setting(void); +#endif + +/* define the value for BT_SCO */ + + +#define SNDDEV_GPIO_MIC2_ANCR_SEL 294 + +static struct q6v2audio_analog_ops default_audio_ops; +static struct q6v2audio_analog_ops *audio_ops = &default_audio_ops; + +void speaker_enable(int en) +{ + if (audio_ops->speaker_enable) + audio_ops->speaker_enable(en); +} + +void headset_enable(int en) +{ + if (audio_ops->headset_enable) + audio_ops->headset_enable(en); +} + +void handset_enable(int en) +{ + if (audio_ops->handset_enable) + audio_ops->handset_enable(en); +} + +void headset_speaker_enable(int en) +{ + if (audio_ops->headset_speaker_enable) + audio_ops->headset_speaker_enable(en); +} + +void int_mic_enable(int en) +{ + if (audio_ops->int_mic_enable) + audio_ops->int_mic_enable(en); +} + +void back_mic_enable(int en) +{ + if (audio_ops->back_mic_enable) + audio_ops->back_mic_enable(en); +} + +void ext_mic_enable(int en) +{ + if (audio_ops->ext_mic_enable) + audio_ops->ext_mic_enable(en); +} + +void stereo_mic_enable(int en) +{ + if (audio_ops->stereo_mic_enable) + audio_ops->stereo_mic_enable(en); +} + +void usb_headset_enable(int en) +{ + if (audio_ops->usb_headset_enable) + audio_ops->usb_headset_enable(en); +} + +void fm_headset_enable(int en) +{ + if (audio_ops->fm_headset_enable) + audio_ops->fm_headset_enable(en); +} + +void fm_speaker_enable(int en) +{ + if (audio_ops->fm_speaker_enable) + audio_ops->fm_speaker_enable(en); +} + +void voltage_on(int on) +{ + if (audio_ops->voltage_on) + audio_ops->voltage_on(on); +} + +static struct regulator *s3; +static struct regulator *mvs; + +static void msm_snddev_enable_dmic_power(void) +{ + int ret; + + pr_aud_err("%s", __func__); + s3 = regulator_get(NULL, "8058_s3"); + if (IS_ERR(s3)) + return; + + ret = regulator_set_voltage(s3, 1800000, 1800000); + if (ret) { + pr_aud_err("%s: error setting voltage\n", __func__); + goto fail_s3; + } + + ret = regulator_enable(s3); + if (ret) { + pr_aud_err("%s: error enabling regulator\n", __func__); + goto fail_s3; + } + + mvs = regulator_get(NULL, "8901_mvs0"); + if (IS_ERR(mvs)) + goto fail_mvs0_get; + + ret = regulator_enable(mvs); + if (ret) { + pr_aud_err("%s: error setting regulator\n", __func__); + goto fail_mvs0_enable; + } + + stereo_mic_enable(1); + return; + +fail_mvs0_enable: + regulator_put(mvs); + mvs = NULL; +fail_mvs0_get: + regulator_disable(s3); +fail_s3: + regulator_put(s3); + s3 = NULL; +} + +static void msm_snddev_disable_dmic_power(void) +{ + int ret; + pr_aud_err("%s", __func__); + + if (mvs) { + ret = regulator_disable(mvs); + if (ret < 0) + pr_aud_err("%s: error disabling vreg mvs\n", __func__); + regulator_put(mvs); + mvs = NULL; + } + + if (s3) { + ret = regulator_disable(s3); + if (ret < 0) + pr_aud_err("%s: error disabling regulator s3\n", __func__); + regulator_put(s3); + s3 = NULL; + } + stereo_mic_enable(0); + +} + +static void msm_snddev_dmic_power(int en) +{ + pr_aud_err("%s", __func__); + if (en) + msm_snddev_enable_dmic_power(); + else + msm_snddev_disable_dmic_power(); +} + +/* GPIO_CLASS_D0_EN */ +#define SNDDEV_GPIO_CLASS_D0_EN 227 + +/* GPIO_CLASS_D1_EN */ +#define SNDDEV_GPIO_CLASS_D1_EN 229 + +#define PM8901_MPP_3 (2) /* PM8901 MPP starts from 0 */ +static void config_class_d1_gpio(int enable) +{ + int rc; + + if (enable) { + rc = gpio_request(SNDDEV_GPIO_CLASS_D1_EN, "CLASSD1_EN"); + if (rc) { + pr_aud_err("%s: spkr pamp gpio %d request" + "failed\n", __func__, SNDDEV_GPIO_CLASS_D1_EN); + return; + } + gpio_direction_output(SNDDEV_GPIO_CLASS_D1_EN, 1); + gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D1_EN, 1); + } else { + gpio_set_value_cansleep(SNDDEV_GPIO_CLASS_D1_EN, 0); + gpio_free(SNDDEV_GPIO_CLASS_D1_EN); + } +} + +static void config_class_d0_gpio(int enable) +{ + int rc; + + if (enable) { + rc = pm8901_mpp_config_digital_out(PM8901_MPP_3, + PM8901_MPP_DIG_LEVEL_MSMIO, 1); + + if (rc) { + pr_aud_err("%s: CLASS_D0_EN failed\n", __func__); + return; + } + + rc = gpio_request(SNDDEV_GPIO_CLASS_D0_EN, "CLASSD0_EN"); + + if (rc) { + pr_aud_err("%s: spkr pamp gpio pm8901 mpp3 request" + "failed\n", __func__); + pm8901_mpp_config_digital_out(PM8901_MPP_3, + PM8901_MPP_DIG_LEVEL_MSMIO, 0); + return; + } + + gpio_direction_output(SNDDEV_GPIO_CLASS_D0_EN, 1); + gpio_set_value(SNDDEV_GPIO_CLASS_D0_EN, 1); + + } else { + pm8901_mpp_config_digital_out(PM8901_MPP_3, + PM8901_MPP_DIG_LEVEL_MSMIO, 0); + gpio_set_value(SNDDEV_GPIO_CLASS_D0_EN, 0); + gpio_free(SNDDEV_GPIO_CLASS_D0_EN); + } +} + +void msm_snddev_poweramp_on(void) +{ + + pr_debug("%s: enable stereo spkr amp\n", __func__); + config_class_d0_gpio(1); + config_class_d1_gpio(1); +} + +void msm_snddev_poweramp_off(void) +{ + + pr_debug("%s: disable stereo spkr amp\n", __func__); + config_class_d0_gpio(0); + config_class_d1_gpio(0); + msleep(30); +} + + +static void msm_snddev_enable_dmic_sec_power(void) +{ + pr_aud_err("%s", __func__); + msm_snddev_enable_dmic_power(); + +#ifdef CONFIG_PMIC8058_OTHC + pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_ALWAYS_ON); +#endif +} + +static void msm_snddev_disable_dmic_sec_power(void) +{ + pr_aud_err("%s", __func__); + msm_snddev_disable_dmic_power(); + +#ifdef CONFIG_PMIC8058_OTHC + pm8058_micbias_enable(OTHC_MICBIAS_2, OTHC_SIGNAL_OFF); +#endif +} + +static void msm_snddev_dmic_sec_power(int en) +{ + pr_aud_err("%s", __func__); + if (en) + msm_snddev_enable_dmic_sec_power(); + else + msm_snddev_disable_dmic_sec_power(); +} + +static struct adie_codec_action_unit iearpiece_48KHz_osr256_actions[] = + EAR_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iearpiece_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iearpiece_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(iearpiece_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iearpiece_profile = { + .path_type = ADIE_CODEC_RX, + .settings = iearpiece_settings, + .setting_sz = ARRAY_SIZE(iearpiece_settings), +}; + +static struct snddev_icodec_data snddev_iearpiece_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "handset_rx", + .copp_id = 0, + .profile = &iearpiece_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = handset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_RECEIVER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_RECEIVER, + .default_aic3254_id = PLAYBACK_RECEIVER, +}; + +static struct platform_device msm_iearpiece_device = { + .name = "snddev_icodec", + .id = 0, + .dev = { .platform_data = &snddev_iearpiece_data }, +}; + +static struct adie_codec_action_unit iearpiece_hac_48KHz_osr256_actions[] = + EAR_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iearpiece_hac_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iearpiece_hac_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(iearpiece_hac_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iearpiece_hac_profile = { + .path_type = ADIE_CODEC_RX, + .settings = iearpiece_hac_settings, + .setting_sz = ARRAY_SIZE(iearpiece_hac_settings), +}; + +static struct snddev_icodec_data snddev_ihac_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "hac_rx", + .copp_id = 0, + .profile = &iearpiece_hac_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = handset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_RECEIVER, + .aic3254_voc_id = HAC, + .default_aic3254_id = PLAYBACK_RECEIVER, +}; + +static struct platform_device msm_ihac_device = { + .name = "snddev_icodec", + .id = 38, + .dev = { .platform_data = &snddev_ihac_data }, +}; + +static struct adie_codec_action_unit imic_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry imic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = imic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(imic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile imic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = imic_settings, + .setting_sz = ARRAY_SIZE(imic_settings), +}; + +static struct snddev_icodec_data snddev_imic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "handset_tx", + .copp_id = 1, + .profile = &imic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECOGNITION_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECOGNITION_IMIC, +}; + +static struct platform_device msm_imic_device = { + .name = "snddev_icodec", + .id = 1, + .dev = { .platform_data = &snddev_imic_data }, +}; + +static struct snddev_icodec_data snddev_nomic_headset_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "nomic_headset_tx", + .copp_id = 1, + .profile = &imic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_HEADSET, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_nomic_headset_tx_device = { + .name = "snddev_icodec", + .id = 40, + .dev = { .platform_data = &snddev_nomic_headset_data }, +}; + +static struct adie_codec_action_unit headset_ab_cpls_48KHz_osr256_actions[] = + HEADSET_AB_CPLS_48000_OSR_256; + +static struct adie_codec_hwsetting_entry headset_ab_cpls_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = headset_ab_cpls_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_ab_cpls_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_ab_cpls_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_ab_cpls_settings, + .setting_sz = ARRAY_SIZE(headset_ab_cpls_settings), +}; + +static struct snddev_icodec_data snddev_ihs_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_stereo_rx", + .copp_id = 0, + .profile = &headset_ab_cpls_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_EMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_headset_stereo_device = { + .name = "snddev_icodec", + .id = 34, + .dev = { .platform_data = &snddev_ihs_stereo_rx_data }, +}; + +static struct snddev_icodec_data snddev_nomic_ihs_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "nomic_headset_stereo_rx", + .copp_id = 0, + .profile = &headset_ab_cpls_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_nomic_headset_stereo_device = { + .name = "snddev_icodec", + .id = 39, + .dev = { .platform_data = &snddev_nomic_ihs_stereo_rx_data }, +}; + +static struct adie_codec_action_unit headset_anc_48KHz_osr256_actions[] = + ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256; + +static struct adie_codec_hwsetting_entry headset_anc_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = headset_anc_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_anc_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_anc_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_anc_settings, + .setting_sz = ARRAY_SIZE(headset_anc_settings), +}; + +static struct snddev_icodec_data snddev_anc_headset_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE | SNDDEV_CAP_ANC), + .name = "anc_headset_stereo_rx", + .copp_id = PRIMARY_I2S_RX, + .profile = &headset_anc_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, +}; + +static struct platform_device msm_anc_headset_device = { + .name = "snddev_icodec", + .id = 51, + .dev = { .platform_data = &snddev_anc_headset_data }, +}; + +static struct adie_codec_action_unit ispkr_stereo_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_stereo_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ispkr_stereo_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_stereo_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_stereo_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_stereo_settings, + .setting_sz = ARRAY_SIZE(ispkr_stereo_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_stereo_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "speaker_stereo_rx", + .copp_id = 0, + .profile = &ispkr_stereo_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_stereo_device = { + .name = "snddev_icodec", + .id = 2, + .dev = { .platform_data = &snddev_ispkr_stereo_data }, +}; + +static struct adie_codec_action_unit idmic_mono_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry idmic_mono_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = idmic_mono_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(idmic_mono_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile idmic_mono_profile = { + .path_type = ADIE_CODEC_TX, + .settings = idmic_mono_settings, + .setting_sz = ARRAY_SIZE(idmic_mono_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "speaker_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &idmic_mono_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_SPEAKER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_ispkr_mic_device = { + .name = "snddev_icodec", + .id = 3, + .dev = { .platform_data = &snddev_ispkr_mic_data }, +}; + +static struct adie_codec_action_unit handset_dual_mic_endfire_8KHz_osr256_actions[] = + DMIC1_PRI_STEREO_8000_OSR_256; + +static struct adie_codec_action_unit spk_dual_mic_endfire_8KHz_osr256_actions[] = + DMIC1_PRI_STEREO_8000_OSR_256; + +static struct adie_codec_hwsetting_entry handset_dual_mic_endfire_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = handset_dual_mic_endfire_8KHz_osr256_actions, + .action_sz = ARRAY_SIZE(handset_dual_mic_endfire_8KHz_osr256_actions), + } +}; + +static struct adie_codec_hwsetting_entry spk_dual_mic_endfire_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = spk_dual_mic_endfire_8KHz_osr256_actions, + .action_sz = ARRAY_SIZE(spk_dual_mic_endfire_8KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile handset_dual_mic_endfire_profile = { + .path_type = ADIE_CODEC_TX, + .settings = handset_dual_mic_endfire_settings, + .setting_sz = ARRAY_SIZE(handset_dual_mic_endfire_settings), +}; + +static struct adie_codec_dev_profile spk_dual_mic_endfire_profile = { + .path_type = ADIE_CODEC_TX, + .settings = spk_dual_mic_endfire_settings, + .setting_sz = ARRAY_SIZE(spk_dual_mic_endfire_settings), +}; + +static struct snddev_icodec_data snddev_dual_mic_endfire_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "handset_dual_mic_endfire_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &handset_dual_mic_endfire_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_power, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_hs_dual_mic_endfire_device = { + .name = "snddev_icodec", + .id = 14, + .dev = { .platform_data = &snddev_dual_mic_endfire_data }, +}; + +static struct snddev_icodec_data snddev_dual_mic_spkr_endfire_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "speaker_dual_mic_endfire_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &spk_dual_mic_endfire_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_power, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_SPEAKER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_spkr_dual_mic_endfire_device = { + .name = "snddev_icodec", + .id = 15, + .dev = { .platform_data = &snddev_dual_mic_spkr_endfire_data }, +}; + +static struct adie_codec_action_unit dual_mic_broadside_8osr256_actions[] = + HS_DMIC2_STEREO_8000_OSR_256; + +static struct adie_codec_hwsetting_entry dual_mic_broadside_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = dual_mic_broadside_8osr256_actions, + .action_sz = ARRAY_SIZE(dual_mic_broadside_8osr256_actions), + } +}; + +static struct adie_codec_dev_profile dual_mic_broadside_profile = { + .path_type = ADIE_CODEC_TX, + .settings = dual_mic_broadside_settings, + .setting_sz = ARRAY_SIZE(dual_mic_broadside_settings), +}; + +static struct snddev_icodec_data snddev_hs_dual_mic_broadside_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "handset_dual_mic_broadside_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &dual_mic_broadside_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_sec_power, +}; + +static struct platform_device msm_hs_dual_mic_broadside_device = { + .name = "snddev_icodec", + .id = 21, + .dev = { .platform_data = &snddev_hs_dual_mic_broadside_data }, +}; + +static struct snddev_icodec_data snddev_spkr_dual_mic_broadside_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "speaker_dual_mic_broadside_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &dual_mic_broadside_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = msm_snddev_dmic_sec_power, +}; + +static struct platform_device msm_spkr_dual_mic_broadside_device = { + .name = "snddev_icodec", + .id = 18, + .dev = { .platform_data = &snddev_spkr_dual_mic_broadside_data }, +}; + +static struct snddev_hdmi_data snddev_hdmi_stereo_rx_data = { + .capability = SNDDEV_CAP_RX , + .name = "hdmi_stereo_rx", + .copp_id = HDMI_RX, + .channel_mode = 0, + .default_sample_rate = 48000, +}; + +static struct platform_device msm_snddev_hdmi_stereo_rx_device = { + .name = "snddev_hdmi", + .id = 0, + .dev = { .platform_data = &snddev_hdmi_stereo_rx_data }, +}; + +static struct snddev_mi2s_data snddev_mi2s_fm_tx_data = { + .capability = SNDDEV_CAP_TX , + .name = "fmradio_stereo_tx", + .copp_id = MI2S_TX, + .channel_mode = 2, /* stereo */ + .sd_lines = MI2S_SD3, /* sd3 */ + .sample_rate = 48000, +}; + +static struct platform_device msm_mi2s_fm_tx_device = { + .name = "snddev_mi2s", + .id = 0, + .dev = { .platform_data = &snddev_mi2s_fm_tx_data }, +}; + +static struct adie_codec_action_unit ifmradio_speaker_osr256_actions[] = + AUXPGA_SPEAKER_RX; + +static struct adie_codec_hwsetting_entry ifmradio_speaker_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ifmradio_speaker_osr256_actions, + .action_sz = ARRAY_SIZE(ifmradio_speaker_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ifmradio_speaker_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ifmradio_speaker_settings, + .setting_sz = ARRAY_SIZE(ifmradio_speaker_settings), +}; + +static struct snddev_mi2s_data snddev_mi2s_fm_rx_data = { + .capability = SNDDEV_CAP_RX , + .name = "fmradio_stereo_rx", + .copp_id = MI2S_RX, + .channel_mode = 2, /* stereo */ + .sd_lines = MI2S_SD3, /* sd3 */ + .sample_rate = 48000, +}; + +static struct platform_device msm_mi2s_fm_rx_device = { + .name = "snddev_mi2s", + .id = 1, + .dev = { .platform_data = &snddev_mi2s_fm_rx_data }, +}; + +static struct snddev_icodec_data snddev_ifmradio_speaker_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_FM), + .name = "fmradio_speaker_rx", + .copp_id = 0, + .profile = &ifmradio_speaker_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = fm_speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = FM_OUT_SPEAKER, + .aic3254_voc_id = FM_OUT_SPEAKER, + .default_aic3254_id = FM_OUT_SPEAKER, +}; + +static struct platform_device msm_ifmradio_speaker_device = { + .name = "snddev_icodec", + .id = 9, + .dev = { .platform_data = &snddev_ifmradio_speaker_data }, +}; + +static struct adie_codec_action_unit ifmradio_headset_osr256_actions[] = + AUXPGA_HEADSET_AB_CPLS_RX_48000; + +static struct adie_codec_hwsetting_entry ifmradio_headset_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ifmradio_headset_osr256_actions, + .action_sz = ARRAY_SIZE(ifmradio_headset_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ifmradio_headset_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ifmradio_headset_settings, + .setting_sz = ARRAY_SIZE(ifmradio_headset_settings), +}; + +static struct snddev_icodec_data snddev_ifmradio_headset_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_FM), + .name = "fmradio_headset_rx", + .copp_id = 0, + .profile = &ifmradio_headset_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = fm_headset_enable, + .voltage_on = voltage_on, + .aic3254_id = FM_OUT_HEADSET, + .aic3254_voc_id = FM_OUT_HEADSET, + .default_aic3254_id = FM_OUT_HEADSET, +}; + +static struct platform_device msm_ifmradio_headset_device = { + .name = "snddev_icodec", + .id = 10, + .dev = { .platform_data = &snddev_ifmradio_headset_data }, +}; + +static struct adie_codec_action_unit iheadset_mic_tx_osr256_actions[] = + HS_AMIC2_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iheadset_mic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iheadset_mic_tx_osr256_actions, + .action_sz = ARRAY_SIZE(iheadset_mic_tx_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iheadset_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = iheadset_mic_tx_settings, + .setting_sz = ARRAY_SIZE(iheadset_mic_tx_settings), +}; + +static struct snddev_icodec_data snddev_headset_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "headset_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &iheadset_mic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECOGNITION_EMIC, + .aic3254_voc_id = CALL_UPLINK_EMIC_HEADSET, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_headset_mic_device = { + .name = "snddev_icodec", + .id = 33, + .dev = { .platform_data = &snddev_headset_mic_data }, +}; + +static struct adie_codec_action_unit + ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions[] = + SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry + ihs_stereo_speaker_stereo_rx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions, + .action_sz = + ARRAY_SIZE(ihs_stereo_speaker_stereo_rx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ihs_stereo_speaker_stereo_rx_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ihs_stereo_speaker_stereo_rx_settings, + .setting_sz = ARRAY_SIZE(ihs_stereo_speaker_stereo_rx_settings), +}; + +static struct snddev_icodec_data snddev_ihs_stereo_speaker_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_stereo_speaker_stereo_rx", + .copp_id = 0, + .profile = &ihs_stereo_speaker_stereo_rx_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = RING_HEADSET_SPEAKER, + .aic3254_voc_id = RING_HEADSET_SPEAKER, + .default_aic3254_id = RING_HEADSET_SPEAKER, +}; + +static struct platform_device msm_ihs_stereo_speaker_stereo_rx_device = { + .name = "snddev_icodec", + .id = 22, + .dev = { .platform_data = &snddev_ihs_stereo_speaker_stereo_rx_data }, +}; + +static struct snddev_ecodec_data snddev_bt_sco_earpiece_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "bt_sco_rx", + .copp_id = PCM_RX, + .channel_mode = 1, +}; + +static struct snddev_ecodec_data snddev_bt_sco_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "bt_sco_tx", + .copp_id = PCM_TX, + .channel_mode = 1, +}; + +struct platform_device msm_bt_sco_earpiece_device = { + .name = "msm_snddev_ecodec", + .id = 0, + .dev = { .platform_data = &snddev_bt_sco_earpiece_data }, +}; + +struct platform_device msm_bt_sco_mic_device = { + .name = "msm_snddev_ecodec", + .id = 1, + .dev = { .platform_data = &snddev_bt_sco_mic_data }, +}; + +static struct adie_codec_action_unit itty_mono_tx_actions[] = + TTY_HEADSET_MONO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry itty_mono_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = itty_mono_tx_actions, + .action_sz = ARRAY_SIZE(itty_mono_tx_actions), + }, +}; + +static struct adie_codec_dev_profile itty_mono_tx_profile = { + .path_type = ADIE_CODEC_TX, + .settings = itty_mono_tx_settings, + .setting_sz = ARRAY_SIZE(itty_mono_tx_settings), +}; + +static struct snddev_icodec_data snddev_itty_mono_tx_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE | SNDDEV_CAP_TTY), + .name = "tty_headset_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &itty_mono_tx_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = TTY_IN_FULL, + .aic3254_voc_id = TTY_IN_FULL, + .default_aic3254_id = TTY_IN_FULL, +}; + +static struct platform_device msm_itty_mono_tx_device = { + .name = "snddev_icodec", + .id = 16, + .dev = { .platform_data = &snddev_itty_mono_tx_data }, +}; + +static struct adie_codec_action_unit itty_mono_rx_actions[] = + TTY_HEADSET_MONO_RX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry itty_mono_rx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = itty_mono_rx_actions, + .action_sz = ARRAY_SIZE(itty_mono_rx_actions), + }, +}; + +static struct adie_codec_dev_profile itty_mono_rx_profile = { + .path_type = ADIE_CODEC_RX, + .settings = itty_mono_rx_settings, + .setting_sz = ARRAY_SIZE(itty_mono_rx_settings), +}; + +static struct snddev_icodec_data snddev_itty_mono_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE | SNDDEV_CAP_TTY), + .name = "tty_headset_mono_rx", + .copp_id = PRIMARY_I2S_RX, + .profile = &itty_mono_rx_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = TTY_OUT_FULL, + .aic3254_voc_id = TTY_OUT_FULL, + .default_aic3254_id = TTY_OUT_FULL, +}; + +static struct platform_device msm_itty_mono_rx_device = { + .name = "snddev_icodec", + .id = 17, + .dev = { .platform_data = &snddev_itty_mono_rx_data }, +}; + +#if 1 //HTC created device +static struct adie_codec_action_unit bmic_tx_48KHz_osr256_actions[] = + AMIC_SEC_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry bmic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = bmic_tx_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(bmic_tx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile bmic_tx_profile = { + .path_type = ADIE_CODEC_TX, + .settings = bmic_tx_settings, + .setting_sz = ARRAY_SIZE(bmic_tx_settings), +}; + +static struct snddev_icodec_data snddev_bmic_tx_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "back_mic_tx", + .copp_id = 1, + .profile = &bmic_tx_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = back_mic_enable, + .aic3254_id = VOICERECORD_EMIC, + .aic3254_voc_id = VOICERECORD_EMIC, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_bmic_tx_device = { + .name = "snddev_icodec", + .id = 50, /* FIX ME */ + .dev = { .platform_data = &snddev_bmic_tx_data }, +}; + +static struct adie_codec_action_unit headset_mono_ab_cpls_48KHz_osr256_actions[] = + HEADSET_AB_CPLS_48000_OSR_256; /* FIX ME */ + +static struct adie_codec_hwsetting_entry headset_mono_ab_cpls_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = headset_mono_ab_cpls_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_mono_ab_cpls_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_mono_ab_cpls_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_mono_ab_cpls_settings, + .setting_sz = ARRAY_SIZE(headset_mono_ab_cpls_settings), +}; + +static struct snddev_icodec_data snddev_ihs_mono_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_mono_rx", + .copp_id = 0, + .profile = &headset_mono_ab_cpls_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_EMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_headset_mono_ab_cpls_device = { + .name = "snddev_icodec", + .id = 35, /* FIX ME */ + .dev = { .platform_data = &snddev_ihs_mono_rx_data }, +}; + +static struct adie_codec_action_unit ihs_ispk_stereo_rx_48KHz_osr256_actions[] = + SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ihs_ispk_stereo_rx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ihs_ispk_stereo_rx_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ihs_ispk_stereo_rx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ihs_ispk_stereo_rx_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ihs_ispk_stereo_rx_settings, + .setting_sz = ARRAY_SIZE(ihs_ispk_stereo_rx_settings), +}; + +static struct snddev_icodec_data snddev_ihs_ispk_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_speaker_stereo_rx", + .copp_id = 0, + .profile = &ihs_ispk_stereo_rx_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = headset_speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = RING_HEADSET_SPEAKER, + .aic3254_voc_id = RING_HEADSET_SPEAKER, + .default_aic3254_id = RING_HEADSET_SPEAKER, +}; + +static struct platform_device msm_iheadset_ispeaker_rx_device = { + .name = "snddev_icodec", + .id = 36, /* FIX ME */ + .dev = { .platform_data = &snddev_ihs_ispk_stereo_rx_data }, +}; + +static struct adie_codec_action_unit idual_mic_48KHz_osr256_actions[] = + DUAL_MIC_STEREO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry idual_mic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = idual_mic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(idual_mic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile idual_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = idual_mic_settings, + .setting_sz = ARRAY_SIZE(idual_mic_settings), +}; + +static struct snddev_icodec_data snddev_idual_mic_endfire_real_stereo_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "dual_mic_stereo_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &idual_mic_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VIDEORECORD_IMIC, + .aic3254_voc_id = VOICERECORD_EMIC, /* FIX ME */ + .default_aic3254_id = VIDEORECORD_IMIC, +}; + +static struct platform_device msm_real_stereo_tx_device = { + .name = "snddev_icodec", + .id = 26, /* FIX ME */ + .dev = { .platform_data = &snddev_idual_mic_endfire_real_stereo_data }, +}; + + + +static struct adie_codec_action_unit iusb_headset_stereo_rx_48KHz_osr256_actions[] = + SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry iusb_headset_stereo_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = iusb_headset_stereo_rx_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(iusb_headset_stereo_rx_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile iusb_headset_stereo_profile = { + .path_type = ADIE_CODEC_RX, + .settings = iusb_headset_stereo_settings, + .setting_sz = ARRAY_SIZE(iusb_headset_stereo_settings), +}; + +static struct snddev_icodec_data snddev_iusb_headset_stereo_rx_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "usb_headset_stereo_rx", + .copp_id = 0, + .profile = &iusb_headset_stereo_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = usb_headset_enable, + .voltage_on = voltage_on, + .aic3254_id = USB_AUDIO, + .aic3254_voc_id = USB_AUDIO, + .default_aic3254_id = USB_AUDIO, +}; + +static struct platform_device msm_iusb_headset_rx_device = { + .name = "snddev_icodec", + .id = 27, /* FIX ME */ + .dev = { .platform_data = &snddev_iusb_headset_stereo_rx_data }, +}; + +static struct adie_codec_action_unit ispkr_mono_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_mono_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ispkr_mono_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_mono_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_mono_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_mono_settings, + .setting_sz = ARRAY_SIZE(ispkr_mono_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_mono_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "speaker_mono_rx", + .copp_id = 0, + .profile = &ispkr_mono_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_mono_device = { + .name = "snddev_icodec", + .id = 28, + .dev = { .platform_data = &snddev_ispkr_mono_data }, +}; + +static struct adie_codec_action_unit camcorder_imic_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_imic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_imic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_imic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_imic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_imic_settings, + .setting_sz = ARRAY_SIZE(camcorder_imic_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_imic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_mono_tx", + .copp_id = 1, + .profile = &camcorder_imic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = int_mic_enable, + .aic3254_id = VOICERECOGNITION_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECOGNITION_IMIC, +}; + +static struct platform_device msm_camcorder_imic_device = { + .name = "snddev_icodec", + .id = 53, + .dev = { .platform_data = &snddev_camcorder_imic_data }, +}; + +static struct adie_codec_action_unit camcorder_idual_mic_48KHz_osr256_actions[] = + DUAL_MIC_STEREO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_idual_mic_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_idual_mic_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_idual_mic_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_idual_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_idual_mic_settings, + .setting_sz = ARRAY_SIZE(camcorder_idual_mic_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_imic_stereo_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_stereo_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &camcorder_idual_mic_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VIDEORECORD_IMIC, + .aic3254_voc_id = VOICERECORD_EMIC, /* FIX ME */ + .default_aic3254_id = VIDEORECORD_IMIC, +}; + +static struct platform_device msm_camcorder_imic_stereo_device = { + .name = "snddev_icodec", + .id = 54, /* FIX ME */ + .dev = { .platform_data = &snddev_camcorder_imic_stereo_data }, +}; + +static struct adie_codec_action_unit camcorder_idual_mic_rev_48KHz_osr256_actions[] = + DUAL_MIC_STEREO_TX_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_idual_mic_rev_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_idual_mic_rev_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_idual_mic_rev_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_idual_mic_rev_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_idual_mic_rev_settings, + .setting_sz = ARRAY_SIZE(camcorder_idual_mic_rev_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_imic_stereo_rev_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_stereo_rev_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &camcorder_idual_mic_rev_profile, + .channel_mode = 2, + .default_sample_rate = 48000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VIDEORECORD_IMIC, + .aic3254_voc_id = VOICERECORD_EMIC, /* FIX ME */ + .default_aic3254_id = VIDEORECORD_IMIC, +}; + +static struct platform_device msm_camcorder_imic_stereo_rev_device = { + .name = "snddev_icodec", + .id = 55, + .dev = { .platform_data = &snddev_camcorder_imic_stereo_rev_data }, +}; + +static struct adie_codec_action_unit camcorder_iheadset_mic_tx_osr256_actions[] = + HS_AMIC2_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry camcorder_iheadset_mic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = camcorder_iheadset_mic_tx_osr256_actions, + .action_sz = ARRAY_SIZE(camcorder_iheadset_mic_tx_osr256_actions), + } +}; + +static struct adie_codec_dev_profile camcorder_iheadset_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = camcorder_iheadset_mic_tx_settings, + .setting_sz = ARRAY_SIZE(camcorder_iheadset_mic_tx_settings), +}; + +static struct snddev_icodec_data snddev_camcorder_headset_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "camcorder_headset_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &camcorder_iheadset_mic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECOGNITION_EMIC, + .aic3254_voc_id = CALL_UPLINK_EMIC_HEADSET, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_camcorder_headset_mic_device = { + .name = "snddev_icodec", + .id = 56, + .dev = { .platform_data = &snddev_camcorder_headset_mic_data }, +}; + +static struct adie_codec_action_unit vr_iearpiece_48KHz_osr256_actions[] = + EAR_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry vr_iearpiece_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = vr_iearpiece_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(vr_iearpiece_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile vr_iearpiece_profile = { + .path_type = ADIE_CODEC_RX, + .settings = vr_iearpiece_settings, + .setting_sz = ARRAY_SIZE(vr_iearpiece_settings), +}; + +static struct snddev_icodec_data snddev_vr_iearpiece_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "vr_handset_mono_tx", + .copp_id = 0, + .profile = &vr_iearpiece_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = handset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_RECEIVER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_RECEIVER, + .default_aic3254_id = PLAYBACK_RECEIVER, +}; + +static struct platform_device msm_vr_iearpiece_device = { + .name = "snddev_icodec", + .id = 57, + .dev = { .platform_data = &snddev_vr_iearpiece_data }, +}; + +static struct adie_codec_action_unit vr_iheadset_mic_tx_osr256_actions[] = + HS_AMIC2_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry vr_iheadset_mic_tx_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = vr_iheadset_mic_tx_osr256_actions, + .action_sz = ARRAY_SIZE(vr_iheadset_mic_tx_osr256_actions), + } +}; + +static struct adie_codec_dev_profile vr_iheadset_mic_profile = { + .path_type = ADIE_CODEC_TX, + .settings = vr_iheadset_mic_tx_settings, + .setting_sz = ARRAY_SIZE(vr_iheadset_mic_tx_settings), +}; + +static struct snddev_icodec_data snddev_vr_headset_mic_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "vr_headset_mono_tx", + .copp_id = PRIMARY_I2S_TX, + .profile = &vr_iheadset_mic_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECOGNITION_EMIC, + .aic3254_voc_id = CALL_UPLINK_EMIC_HEADSET, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_vr_headset_mic_device = { + .name = "snddev_icodec", + .id = 58, + .dev = { .platform_data = &snddev_vr_headset_mic_data }, +}; + +static struct adie_codec_action_unit ispkr_mono_alt_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_mono_alt_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = ispkr_mono_alt_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_mono_alt_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_mono_alt_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_mono_alt_settings, + .setting_sz = ARRAY_SIZE(ispkr_mono_alt_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_mono_alt_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "speaker_mono_alt_rx", + .copp_id = 0, + .profile = &ispkr_mono_alt_profile, + .channel_mode = 1, + .default_sample_rate = 48000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_mono_alt_device = { + .name = "snddev_icodec", + .id = 59, + .dev = { .platform_data = &snddev_ispkr_mono_alt_data }, +}; + + +static struct adie_codec_action_unit imic_note_48KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry imic_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = imic_note_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(imic_note_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile imic_note_profile = { + .path_type = ADIE_CODEC_TX, + .settings = imic_note_settings, + .setting_sz = ARRAY_SIZE(imic_note_settings), +}; + +static struct snddev_icodec_data snddev_imic_note_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "imic_note_tx", + .copp_id = 1, + .profile = &imic_note_profile, + .channel_mode = 2, + .default_sample_rate = 16000, + .pamp_on = stereo_mic_enable, + .aic3254_id = VOICERECORD_IMIC, + .aic3254_voc_id = CALL_UPLINK_IMIC_RECEIVER, + .default_aic3254_id = VOICERECORD_IMIC, +}; + +static struct platform_device msm_imic_note_device = { + .name = "snddev_icodec", + .id = 60, + .dev = { .platform_data = &snddev_imic_note_data }, +}; + +static struct adie_codec_action_unit ispkr_note_48KHz_osr256_actions[] = + SPEAKER_PRI_48000_OSR_256; + +static struct adie_codec_hwsetting_entry ispkr_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = ispkr_note_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(ispkr_note_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile ispkr_note_profile = { + .path_type = ADIE_CODEC_RX, + .settings = ispkr_note_settings, + .setting_sz = ARRAY_SIZE(ispkr_note_settings), +}; + +static struct snddev_icodec_data snddev_ispkr_note_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "ispkr_note_rx", + .copp_id = 0, + .profile = &ispkr_note_profile, + .channel_mode = 2, + .default_sample_rate = 16000, + .pamp_on = speaker_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_SPEAKER, + .aic3254_voc_id = CALL_DOWNLINK_IMIC_SPEAKER, + .default_aic3254_id = PLAYBACK_SPEAKER, +}; + +static struct platform_device msm_ispkr_note_device = { + .name = "snddev_icodec", + .id = 61, + .dev = { .platform_data = &snddev_ispkr_note_data }, +}; + +static struct adie_codec_action_unit emic_note_16KHz_osr256_actions[] = + AMIC_PRI_MONO_48000_OSR_256; + +static struct adie_codec_hwsetting_entry emic_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = emic_note_16KHz_osr256_actions, + .action_sz = ARRAY_SIZE(emic_note_16KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile emic_note_profile = { + .path_type = ADIE_CODEC_TX, + .settings = emic_note_settings, + .setting_sz = ARRAY_SIZE(emic_note_settings), +}; + +static struct snddev_icodec_data snddev_emic_note_data = { + .capability = (SNDDEV_CAP_TX | SNDDEV_CAP_VOICE), + .name = "emic_note_tx", + .copp_id = 1, + .profile = &emic_note_profile, + .channel_mode = 1, + .default_sample_rate = 16000, + .pamp_on = ext_mic_enable, + .aic3254_id = VOICERECORD_EMIC, + .aic3254_voc_id = VOICERECORD_EMIC, + .default_aic3254_id = VOICERECORD_EMIC, +}; + +static struct platform_device msm_emic_note_device = { + .name = "snddev_icodec", + .id = 62, + .dev = { .platform_data = &snddev_emic_note_data }, +}; + +static struct adie_codec_action_unit headset_note_48KHz_osr256_actions[] = + HEADSET_AB_CPLS_48000_OSR_256; + +static struct adie_codec_hwsetting_entry headset_note_settings[] = { + { + .freq_plan = 16000, + .osr = 256, + .actions = headset_note_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE(headset_note_48KHz_osr256_actions), + } +}; + +static struct adie_codec_dev_profile headset_note_profile = { + .path_type = ADIE_CODEC_RX, + .settings = headset_note_settings, + .setting_sz = ARRAY_SIZE(headset_note_settings), +}; + +static struct snddev_icodec_data snddev_ihs_note_data = { + .capability = (SNDDEV_CAP_RX | SNDDEV_CAP_VOICE), + .name = "headset_note_rx", + .copp_id = 0, + .profile = &headset_note_profile, + .channel_mode = 2, + .default_sample_rate = 16000, + .pamp_on = headset_enable, + .voltage_on = voltage_on, + .aic3254_id = PLAYBACK_HEADSET, + .aic3254_voc_id = CALL_DOWNLINK_EMIC_HEADSET, + .default_aic3254_id = PLAYBACK_HEADSET, +}; + +static struct platform_device msm_headset_note_device = { + .name = "snddev_icodec", + .id = 63, + .dev = { .platform_data = &snddev_ihs_note_data }, +}; + + + +#endif + +#ifdef CONFIG_DEBUG_FS +static struct adie_codec_action_unit + ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions[] = + HPH_PRI_D_LEG_STEREO; + +static struct adie_codec_hwsetting_entry + ihs_stereo_rx_class_d_legacy_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = + ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE + (ihs_stereo_rx_class_d_legacy_48KHz_osr256_actions), + } +}; + +static struct adie_codec_action_unit + ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions[] = + HPH_PRI_AB_LEG_STEREO; + +static struct adie_codec_hwsetting_entry + ihs_stereo_rx_class_ab_legacy_settings[] = { + { + .freq_plan = 48000, + .osr = 256, + .actions = + ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions, + .action_sz = ARRAY_SIZE + (ihs_stereo_rx_class_ab_legacy_48KHz_osr256_actions), + } +}; + +static void snddev_hsed_config_modify_setting(int type) +{ + struct platform_device *device; + struct snddev_icodec_data *icodec_data; + + device = &msm_headset_stereo_device; + icodec_data = (struct snddev_icodec_data *)device->dev.platform_data; + + if (icodec_data) { + if (type == 1) { + icodec_data->voltage_on = NULL; + icodec_data->profile->settings = + ihs_stereo_rx_class_d_legacy_settings; + icodec_data->profile->setting_sz = + ARRAY_SIZE(ihs_stereo_rx_class_d_legacy_settings); + } else if (type == 2) { + icodec_data->voltage_on = NULL; + icodec_data->profile->settings = + ihs_stereo_rx_class_ab_legacy_settings; + icodec_data->profile->setting_sz = + ARRAY_SIZE(ihs_stereo_rx_class_ab_legacy_settings); + } + } +} + +static void snddev_hsed_config_restore_setting(void) +{ + struct platform_device *device; + struct snddev_icodec_data *icodec_data; + + device = &msm_headset_stereo_device; + icodec_data = (struct snddev_icodec_data *)device->dev.platform_data; + + if (icodec_data) { + icodec_data->voltage_on = voltage_on; + icodec_data->profile->settings = headset_ab_cpls_settings; + icodec_data->profile->setting_sz = + ARRAY_SIZE(headset_ab_cpls_settings); + } +} + +static ssize_t snddev_hsed_config_debug_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char *lb_str = filp->private_data; + char cmd; + + if (get_user(cmd, ubuf)) + return -EFAULT; + + if (!strcmp(lb_str, "msm_hsed_config")) { + switch (cmd) { + case '0': + snddev_hsed_config_restore_setting(); + break; + + case '1': + snddev_hsed_config_modify_setting(1); + break; + + case '2': + snddev_hsed_config_modify_setting(2); + break; + + default: + break; + } + } + return cnt; +} + +static int snddev_hsed_config_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations snddev_hsed_config_debug_fops = { + .open = snddev_hsed_config_debug_open, + .write = snddev_hsed_config_debug_write +}; +#endif + +static struct snddev_virtual_data snddev_uplink_rx_data = { + .capability = SNDDEV_CAP_RX, + .name = "uplink_rx", + .copp_id = VOICE_PLAYBACK_TX, +}; + +static struct platform_device msm_uplink_rx_device = { + .name = "snddev_virtual", + .dev = { .platform_data = &snddev_uplink_rx_data }, +}; + +static struct platform_device *snd_devices_common[] __initdata = { + &msm_uplink_rx_device, +}; + +static struct platform_device *snd_devices_surf[] __initdata = { + &msm_iearpiece_device, + &msm_imic_device, + &msm_ispkr_stereo_device, + &msm_snddev_hdmi_stereo_rx_device, + &msm_headset_mic_device, + &msm_ispkr_mic_device, + &msm_bt_sco_earpiece_device, + &msm_bt_sco_mic_device, + &msm_headset_stereo_device, + &msm_itty_mono_tx_device, + &msm_itty_mono_rx_device, + &msm_mi2s_fm_tx_device, + &msm_mi2s_fm_rx_device, + &msm_ifmradio_speaker_device, + &msm_ifmradio_headset_device, + &msm_hs_dual_mic_endfire_device, + &msm_spkr_dual_mic_endfire_device, + &msm_hs_dual_mic_broadside_device, + &msm_spkr_dual_mic_broadside_device, + &msm_ihs_stereo_speaker_stereo_rx_device, + &msm_headset_mono_ab_cpls_device, + &msm_iheadset_ispeaker_rx_device, + &msm_bmic_tx_device, + &msm_anc_headset_device, + &msm_real_stereo_tx_device, + &msm_ihac_device, + &msm_nomic_headset_tx_device, + &msm_nomic_headset_stereo_device, + &msm_iusb_headset_rx_device, + &msm_ispkr_mono_device, + &msm_camcorder_imic_device, + &msm_camcorder_imic_stereo_device, + &msm_camcorder_imic_stereo_rev_device, + &msm_camcorder_headset_mic_device, + &msm_vr_iearpiece_device, + &msm_vr_headset_mic_device, + &msm_ispkr_mono_alt_device, + &msm_imic_note_device, + &msm_ispkr_note_device, + &msm_emic_note_device, + &msm_headset_note_device, +}; + +void htc_8x60_register_analog_ops(struct q6v2audio_analog_ops *ops) +{ + audio_ops = ops; +} + +void __init msm_snddev_init(void) +{ + int rc, i; + int dev_id; + + platform_add_devices(snd_devices_surf, ARRAY_SIZE(snd_devices_surf)); +#ifdef CONFIG_DEBUG_FS + debugfs_hsed_config = debugfs_create_file("msm_hsed_config", + S_IFREG | S_IRUGO, NULL, + (void *) "msm_hsed_config", &snddev_hsed_config_debug_fops); +#endif + + for (i = 0, dev_id = 0; i < ARRAY_SIZE(snd_devices_common); i++) + snd_devices_common[i]->id = dev_id++; + + platform_add_devices(snd_devices_common, + ARRAY_SIZE(snd_devices_common)); + + rc = gpio_request(SNDDEV_GPIO_CLASS_D1_EN, "CLASSD1_EN"); + if (rc) { + pr_aud_err("%s: spkr pamp gpio %d request" + "failed\n", __func__, SNDDEV_GPIO_CLASS_D1_EN); + } else { + gpio_direction_output(SNDDEV_GPIO_CLASS_D1_EN, 0); + gpio_free(SNDDEV_GPIO_CLASS_D1_EN); + } +} diff --git a/arch/arm/mach-msm/qdsp6v3/dsp_debug.c b/arch/arm/mach-msm/qdsp6v3/dsp_debug.c new file mode 100644 index 00000000000..8bf7fee3830 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/dsp_debug.c @@ -0,0 +1,202 @@ +/* arch/arm/mach-msm/qdsp6/dsp_dump.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../proc_comm.h" +#include +#include "dsp_debug.h" + +static wait_queue_head_t dsp_wait; +static int dsp_has_crashed; +static int dsp_wait_count; + +static atomic_t dsp_crash_count = ATOMIC_INIT(0); +dsp_state_cb cb_ptr; + +void q6audio_dsp_not_responding(void) +{ + if (cb_ptr) + cb_ptr(DSP_STATE_CRASHED); + if (atomic_add_return(1, &dsp_crash_count) != 1) { + pr_aud_err("q6audio_dsp_not_responding() \ + - parking additional crasher...\n"); + for (;;) + msleep(1000); + } + if (dsp_wait_count) { + dsp_has_crashed = 1; + wake_up(&dsp_wait); + + while (dsp_has_crashed != 2) + wait_event(dsp_wait, dsp_has_crashed == 2); + } else { + pr_aud_err("q6audio_dsp_not_responding() - no waiter?\n"); + } + if (cb_ptr) + cb_ptr(DSP_STATE_CRASH_DUMP_DONE); + + BUG(); +} + +static int dsp_open(struct inode *inode, struct file *file) +{ + return 0; +} + +#define DSP_NMI_ADDR 0x28800010 + +static ssize_t dsp_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + char cmd[32]; + void __iomem *ptr; + + if (count >= sizeof(cmd)) + return -EINVAL; + if (copy_from_user(cmd, buf, count)) + return -EFAULT; + cmd[count] = 0; + + if ((count > 1) && (cmd[count-1] == '\n')) + cmd[count-1] = 0; + + if (!strcmp(cmd, "wait-for-crash")) { + while (!dsp_has_crashed) { + int res; + dsp_wait_count++; + res = wait_event_interruptible(dsp_wait, + dsp_has_crashed); + if (res < 0) { + dsp_wait_count--; + return res; + } + } + /* assert DSP NMI */ + ptr = ioremap(DSP_NMI_ADDR, 0x16); + if (!ptr) { + pr_aud_err("Unable to map DSP NMI\n"); + return -EFAULT; + } + writel(0x1, (void *)ptr); + iounmap(ptr); + } else if (!strcmp(cmd, "boom")) { + q6audio_dsp_not_responding(); + } else if (!strcmp(cmd, "continue-crash")) { + dsp_has_crashed = 2; + wake_up(&dsp_wait); + } else { + pr_aud_err("[%s:%s] unknown dsp_debug command: %s\n", __MM_FILE__, + __func__, cmd); + } + + return count; +} + +#define DSP_RAM_BASE 0x46700000 +#define DSP_RAM_SIZE 0x2000000 + +static unsigned copy_ok_count; + +static ssize_t dsp_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + size_t actual = 0; + size_t mapsize = PAGE_SIZE; + unsigned addr; + void __iomem *ptr; + + if (*pos >= DSP_RAM_SIZE) + return 0; + + if (*pos & (PAGE_SIZE - 1)) + return -EINVAL; + + addr = (*pos + DSP_RAM_BASE); + + /* don't blow up if we're unaligned */ + if (addr & (PAGE_SIZE - 1)) + mapsize *= 2; + + while (count >= PAGE_SIZE) { + ptr = ioremap(addr, mapsize); + if (!ptr) { + pr_aud_err("[%s:%s] map error @ %x\n", __MM_FILE__, + __func__, addr); + return -EFAULT; + } + if (copy_to_user(buf, ptr, PAGE_SIZE)) { + iounmap(ptr); + pr_aud_err("[%s:%s] copy error @ %p\n", __MM_FILE__, + __func__, buf); + return -EFAULT; + } + copy_ok_count += PAGE_SIZE; + iounmap(ptr); + addr += PAGE_SIZE; + buf += PAGE_SIZE; + actual += PAGE_SIZE; + count -= PAGE_SIZE; + } + + *pos += actual; + return actual; +} + +static int dsp_release(struct inode *inode, struct file *file) +{ + return 0; +} + +int dsp_debug_register(dsp_state_cb ptr) +{ + if (ptr == NULL) + return -EINVAL; + cb_ptr = ptr; + + return 0; +} + +static const struct file_operations dsp_fops = { + .owner = THIS_MODULE, + .open = dsp_open, + .read = dsp_read, + .write = dsp_write, + .release = dsp_release, +}; + +static struct miscdevice dsp_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "dsp_debug", + .fops = &dsp_fops, +}; + + +static int __init dsp_init(void) +{ + init_waitqueue_head(&dsp_wait); + return misc_register(&dsp_misc); +} + +device_initcall(dsp_init); diff --git a/arch/arm/mach-msm/qdsp6v3/dsp_debug.h b/arch/arm/mach-msm/qdsp6v3/dsp_debug.h new file mode 100644 index 00000000000..1a73dd42395 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/dsp_debug.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __DSP_DEBUG_H_ +#define __DSP_DEBUG_H_ + +typedef int (*dsp_state_cb)(int state); +int dsp_debug_register(dsp_state_cb ptr); + +#define DSP_STATE_CRASHED 0x0 +#define DSP_STATE_CRASH_DUMP_DONE 0x1 + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/evrc_in.c b/arch/arm/mach-msm/qdsp6v3/evrc_in.c new file mode 100644 index 00000000000..5d4463583ae --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/evrc_in.c @@ -0,0 +1,337 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((23+sizeof(struct meta_out_dsp)) * 10)) + +void q6asm_evrc_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode - %d\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +/* ------------------- device --------------------- */ +static long evrc_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_evrc_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + /* rate_modulation_cmd set to zero + currently not configurable from user space */ + rc = q6asm_enc_cfg_blk_evrc(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate, 0); + + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd evrc media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_EVRC_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_evrc_enc_config))) + rc = -EFAULT; + break; + } + case AUDIO_SET_EVRC_ENC_CONFIG: { + struct msm_audio_evrc_enc_config cfg; + struct msm_audio_evrc_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_evrc_enc_config))) { + rc = -EFAULT; + break; + } + + if (cfg.min_bit_rate > 4 || + cfg.min_bit_rate < 1 || + (cfg.min_bit_rate == 2)) { + pr_aud_err("%s:session id %d: invalid min bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + if (cfg.max_bit_rate > 4 || + cfg.max_bit_rate < 1 || + (cfg.max_bit_rate == 2)) { + pr_aud_err("%s:session id %d: invalid max bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->min_bit_rate = cfg.min_bit_rate; + enc_cfg->max_bit_rate = cfg.max_bit_rate; + pr_debug("%s:session id %d: min_bit_rate= 0x%x\ + max_bit_rate=0x%x\n", __func__, + audio->ac->session, enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate); + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int evrc_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_evrc_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for evrc\ + driver\n", __func__, audio->ac->session); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_evrc_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 23; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->min_bit_rate = 4; + enc_cfg->max_bit_rate = 4; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_evrc_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s:session id %d: Could not allocate memory for audio\ + client\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open evrc encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_EVRC, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_EVRC); + if (rc < 0) { + pr_aud_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, + audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = evrc_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = evrc_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_evrc_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_evrc_in", + .fops = &audio_in_fops, +}; + +static int __init evrc_in_init(void) +{ + return misc_register(&audio_evrc_in_misc); +} + +device_initcall(evrc_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/fm.c b/arch/arm/mach-msm/qdsp6v3/fm.c new file mode 100644 index 00000000000..8bbd4d558bc --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/fm.c @@ -0,0 +1,259 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Based on the mp3 native driver in arch/arm/mach-msm/qdsp5v2/audio_mp3.c + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * + * All source code in this file is licensed under the following license except + * where indicated. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * along with this program; if not, you can find it at http://www.fsf.org + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SESSION_ID_FM (MAX_SESSIONS + 1) +#define FM_ENABLE 0x1 +#define FM_DISABLE 0x0 +#define FM_COPP 0x7 + +struct audio { + struct mutex lock; + + int opened; + int enabled; + int running; + + uint16_t fm_source; + uint16_t fm_src_copp_id; + uint16_t fm_dest; + uint16_t fm_dst_copp_id; + uint16_t dec_id; + uint32_t device_events; + uint16_t volume; +}; + +static struct audio fm_audio; +static int fm_audio_enable(struct audio *audio) +{ + if (audio->enabled) + return 0; + + pr_aud_info("%s: fm dest= %08x fm_source = %08x\n", __func__, + audio->fm_dst_copp_id, audio->fm_src_copp_id); + + /* do afe loopback here */ + + if (audio->fm_dest && audio->fm_source) { + if (afe_loopback(FM_ENABLE, audio->fm_dst_copp_id, + audio->fm_src_copp_id) < 0) { + pr_aud_err("%s: afe_loopback failed\n", __func__); + } + + audio->running = 1; + } + + audio->enabled = 1; + return 0; +} + +static void fm_audio_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct audio *audio = (struct audio *) private_data; + switch (evt_id) { + case AUDDEV_EVT_DEV_RDY: + pr_aud_info("%s :AUDDEV_EVT_DEV_RDY\n", __func__); + if (evt_payload->routing_id == FM_COPP) { + audio->fm_source = 1; + audio->fm_src_copp_id = FM_COPP; + } else { + audio->fm_dest = 1; + audio->fm_dst_copp_id = evt_payload->routing_id; + } + + if (audio->enabled && + audio->fm_dest && + audio->fm_source && !audio->running) { + + afe_loopback(FM_ENABLE, audio->fm_dst_copp_id, + audio->fm_src_copp_id); + audio->running = 1; + } + break; + case AUDDEV_EVT_DEV_RLS: + pr_aud_info("%s: AUDDEV_EVT_DEV_RLS\n", __func__); + if (evt_payload->routing_id == audio->fm_src_copp_id) + audio->fm_source = 0; + else + audio->fm_dest = 0; + if (audio->running + && (!audio->fm_dest && !audio->fm_source)) { + afe_loopback(FM_DISABLE, audio->fm_dst_copp_id, + audio->fm_src_copp_id); + audio->running = 0; + } else { + pr_aud_err("%s: device switch happened\n", __func__); + } + break; + case AUDDEV_EVT_STREAM_VOL_CHG: + pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG\n", __func__); + if (audio->fm_source) { + audio->volume = evt_payload->session_vol; + afe_loopback_gain(audio->fm_src_copp_id, + audio->volume); + } + break; + + default: + pr_aud_err("%s: ERROR:wrong event %08x\n", __func__, evt_id); + break; + } +} + +static int fm_audio_disable(struct audio *audio) +{ + + /* break the AFE loopback here */ + afe_loopback(FM_DISABLE, audio->fm_dst_copp_id, audio->fm_src_copp_id); + return 0; +} + +static long fm_audio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct audio *audio = file->private_data; + int rc = -EINVAL; + + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_START: + pr_aud_info("%s: AUDIO_START\n", __func__); + rc = fm_audio_enable(audio); + break; + case AUDIO_STOP: + pr_aud_info("%s: AUDIO_STOP\n", __func__); + rc = fm_audio_disable(audio); + audio->running = 0; + audio->enabled = 0; + break; + case AUDIO_GET_SESSION_ID: + if (copy_to_user((void *) arg, &audio->dec_id, + sizeof(unsigned short))) + rc = -EFAULT; + else + rc = 0; + break; + default: + rc = -EINVAL; + pr_aud_err("%s: Un supported IOCTL\n", __func__); + } + mutex_unlock(&audio->lock); + return rc; +} + +static int fm_audio_release(struct inode *inode, struct file *file) +{ + struct audio *audio = file->private_data; + + pr_debug("audio instance 0x%08x freeing\n", (int)audio); + mutex_lock(&audio->lock); + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id); + fm_audio_disable(audio); + audio->running = 0; + audio->enabled = 0; + audio->opened = 0; + mutex_unlock(&audio->lock); + return 0; +} + +static int fm_audio_open(struct inode *inode, struct file *file) +{ + struct audio *audio = &fm_audio; + int rc = 0; + + + if (audio->opened) + return -EPERM; + + /* Allocate the decoder */ + audio->dec_id = SESSION_ID_FM; + + audio->running = 0; + audio->fm_source = 0; + audio->fm_dest = 0; + + audio->device_events = AUDDEV_EVT_DEV_RDY + |AUDDEV_EVT_DEV_RLS| + AUDDEV_EVT_STREAM_VOL_CHG; + + rc = auddev_register_evt_listner(audio->device_events, + AUDDEV_CLNT_DEC, + audio->dec_id, + fm_audio_listner, + (void *)audio); + + if (rc) { + pr_aud_err("%s: failed to register listnet\n", __func__); + goto event_err; + } + + audio->opened = 1; + file->private_data = audio; + +event_err: + return rc; +} + +static const struct file_operations audio_fm_fops = { + .owner = THIS_MODULE, + .open = fm_audio_open, + .release = fm_audio_release, + .unlocked_ioctl = fm_audio_ioctl, +}; + +struct miscdevice audio_fm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_fm", + .fops = &audio_fm_fops, +}; + +static int __init fm_audio_init(void) +{ + struct audio *audio = &fm_audio; + + mutex_init(&audio->lock); + return misc_register(&audio_fm_misc); +} + +device_initcall(fm_audio_init); + +MODULE_DESCRIPTION("MSM FM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/pcm_in.c b/arch/arm/mach-msm/qdsp6v3/pcm_in.c new file mode 100644 index 00000000000..7282e2de982 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/pcm_in.c @@ -0,0 +1,504 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (C) 2009 HTC Corporation + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_BUF 4 +#define BUFSZ (480 * 8) +#define BUFFER_SIZE_MULTIPLE 4 +#define MIN_BUFFER_SIZE 160 + +#define VOC_REC_NONE 0xFF + +struct pcm { + struct mutex lock; + struct mutex read_lock; + wait_queue_head_t wait; + spinlock_t dsp_lock; + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t rec_mode; + uint32_t in_frame_info[MAX_BUF][2]; + atomic_t in_count; + atomic_t in_enabled; + atomic_t in_opened; + atomic_t in_stopped; +}; + +static atomic_t pcm_opened = ATOMIC_INIT(0); + +static void pcm_in_get_dsp_buffers(struct pcm*, + uint32_t token, uint32_t *payload); + +void pcm_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct pcm *pcm = (struct pcm *) priv; + unsigned long flags; + + spin_lock_irqsave(&pcm->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + pcm_in_get_dsp_buffers(pcm, token, payload); + break; + default: + break; + } + spin_unlock_irqrestore(&pcm->dsp_lock, flags); +} + +static void pcm_in_get_dsp_buffers(struct pcm *pcm, + uint32_t token, uint32_t *payload) +{ + pcm->in_frame_info[token][0] = payload[7]; + pcm->in_frame_info[token][1] = payload[3]; + if (atomic_read(&pcm->in_count) <= pcm->buffer_count) + atomic_inc(&pcm->in_count); + wake_up(&pcm->wait); +} + +static int pcm_in_enable(struct pcm *pcm) +{ + if (atomic_read(&pcm->in_enabled)) + return 0; + return q6asm_run(pcm->ac, 0, 0, 0); +} + +static int pcm_in_disable(struct pcm *pcm) +{ + int rc = 0; + + if (atomic_read(&pcm->in_opened)) { + atomic_set(&pcm->in_enabled, 0); + atomic_set(&pcm->in_opened, 0); + rc = q6asm_cmd(pcm->ac, CMD_CLOSE); + + atomic_set(&pcm->in_stopped, 1); + memset(pcm->in_frame_info, 0, + sizeof(char) * pcm->buffer_count * 2); + wake_up(&pcm->wait); + } + return rc; +} + +static int config(struct pcm *pcm) +{ + int rc = 0; + + pr_debug("%s: pcm prefill, buffer_size = %d\n", __func__, + pcm->buffer_size); + rc = q6asm_audio_client_buf_alloc(OUT, pcm->ac, + pcm->buffer_size, pcm->buffer_count); + if (rc < 0) { + pr_aud_err("Audio Start: Buffer Allocation failed \ + rc = %d\n", rc); + goto fail; + } + + rc = q6asm_enc_cfg_blk_pcm(pcm->ac, pcm->sample_rate, + pcm->channel_count); + if (rc < 0) { + pr_aud_err("%s: cmd media format block failed", __func__); + goto fail; + } +fail: + return rc; +} + +static long pcm_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pcm *pcm = file->private_data; + int rc = 0; + + mutex_lock(&pcm->lock); + switch (cmd) { + case AUDIO_SET_VOLUME: + break; + case AUDIO_GET_STATS: { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + rc = -EFAULT; + break; + } + case AUDIO_START: { + int cnt = 0; + if (atomic_read(&pcm->in_enabled)) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = config(pcm); + if (rc) { + pr_aud_err("%s: IN Configuration failed\n", __func__); + rc = -EFAULT; + break; + } + + rc = pcm_in_enable(pcm); + if (rc) { + pr_aud_err("%s: In Enable failed\n", __func__); + rc = -EFAULT; + break; + } + + atomic_set(&pcm->in_enabled, 1); + + while (cnt++ < pcm->buffer_count) + q6asm_read(pcm->ac); + pr_aud_info("%s: AUDIO_START session id[%d]\n", __func__, + pcm->ac->session); + + if (pcm->rec_mode != VOC_REC_NONE) + msm_enable_incall_recording(pcm->ac->session, + pcm->rec_mode, pcm->sample_rate, pcm->channel_count); + + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &pcm->ac->session, + sizeof(unsigned short))) + rc = -EFAULT; + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + pr_debug("%s: SET_CONFIG: buffer_size:%d channel_count:%d" + "sample_rate:%d, buffer_count:%d\n", __func__, + config.buffer_size, config.channel_count, + config.sample_rate, config.buffer_count); + + if (!config.channel_count || config.channel_count > 2) { + rc = -EINVAL; + break; + } + + if (config.sample_rate < 8000 || config.sample_rate > 48000) { + rc = -EINVAL; + break; + } + + if ((config.buffer_size % (config.channel_count * + BUFFER_SIZE_MULTIPLE)) || + (config.buffer_size < MIN_BUFFER_SIZE)) { + pr_aud_err("%s: Buffer Size should be multiple of " + "[4 * no. of channels] and greater than 160\n", + __func__); + rc = -EINVAL; + break; + } + + pcm->sample_rate = config.sample_rate; + pcm->channel_count = config.channel_count; + pcm->buffer_size = config.buffer_size; + pcm->buffer_count = config.buffer_count; + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + config.buffer_size = pcm->buffer_size; + config.buffer_count = pcm->buffer_count; + config.sample_rate = pcm->sample_rate; + config.channel_count = pcm->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) + rc = -EFAULT; + break; + } + case AUDIO_ENABLE_AUDPRE: { + + uint16_t enable_mask; + + if (copy_from_user(&enable_mask, (void *) arg, + sizeof(enable_mask))) { + rc = -EFAULT; + break; + } + if (enable_mask & FLUENCE_ENABLE) + rc = auddev_cfg_tx_copp_topology(pcm->ac->session, + VPM_TX_DM_FLUENCE_COPP_TOPOLOGY); + else if (enable_mask & STEREO_RECORD_ENABLE) + rc = auddev_cfg_tx_copp_topology(pcm->ac->session, + HTC_STEREO_RECORD_TOPOLOGY); + else + rc = auddev_cfg_tx_copp_topology(pcm->ac->session, + DEFAULT_COPP_TOPOLOGY); + break; + } + case AUDIO_SET_INCALL: { + if (copy_from_user(&pcm->rec_mode, + (void *) arg, + sizeof(pcm->rec_mode))) { + rc = -EFAULT; + pr_aud_err("%s: Error copying in-call mode\n", __func__); + break; + } + + if (pcm->rec_mode != VOC_REC_UPLINK && + pcm->rec_mode != VOC_REC_DOWNLINK && + pcm->rec_mode != VOC_REC_BOTH) { + rc = -EINVAL; + pcm->rec_mode = VOC_REC_NONE; + + pr_aud_err("%s: Invalid %d in-call rec_mode\n", + __func__, pcm->rec_mode); + break; + } + + pr_debug("%s: In-call rec_mode %d\n", __func__, pcm->rec_mode); + break; + } + + + default: + rc = -EINVAL; + break; + } + mutex_unlock(&pcm->lock); + return rc; +} + +static int pcm_in_open(struct inode *inode, struct file *file) +{ + struct pcm *pcm; + int rc = 0; + struct timespec ts; + struct rtc_time tm; + + if (atomic_cmpxchg(&pcm_opened, 0, 1) != 0) { + rc = -EBUSY; + return rc; + } + + pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); + if (!pcm) + return -ENOMEM; + + pcm->channel_count = 1; + pcm->sample_rate = 8000; + pcm->buffer_size = BUFSZ; + pcm->buffer_count = MAX_BUF; + + pcm->ac = q6asm_audio_client_alloc((app_cb)pcm_in_cb, (void *)pcm); + if (!pcm->ac) { + pr_aud_err("%s: Could not allocate memory\n", __func__); + rc = -ENOMEM; + goto fail; + } + + mutex_init(&pcm->lock); + mutex_init(&pcm->read_lock); + spin_lock_init(&pcm->dsp_lock); + init_waitqueue_head(&pcm->wait); + + rc = q6asm_open_read(pcm->ac, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s: Cmd Open Failed\n", __func__); + goto fail; + } + + atomic_set(&pcm->in_stopped, 0); + atomic_set(&pcm->in_enabled, 0); + atomic_set(&pcm->in_count, 0); + atomic_set(&pcm->in_opened, 1); + + pcm->rec_mode = VOC_REC_NONE; + + file->private_data = pcm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_aud_info1("[ATS][start_recording][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + pr_aud_info("%s: pcm in open session id[%d]\n", __func__, pcm->ac->session); + + return 0; +fail: + if (pcm->ac) + q6asm_audio_client_free(pcm->ac); + kfree(pcm); + atomic_set(&pcm_opened, 0); + return rc; +} + +static ssize_t pcm_in_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + struct pcm *pcm = file->private_data; + const char __user *start = buf; + void *data; + uint32_t offset = 0; + uint32_t size = 0; + uint32_t idx; + int rc = 0; + int len = 0; + + if (!atomic_read(&pcm->in_enabled)) + return -EFAULT; + mutex_lock(&pcm->read_lock); + while (count > 0) { + rc = wait_event_timeout(pcm->wait, + (atomic_read(&pcm->in_count) || + atomic_read(&pcm->in_stopped)), 5 * HZ); + if (!rc) { + pr_aud_err("%s: wait_event_timeout failed\n", __func__); + goto fail; + } + + if (atomic_read(&pcm->in_stopped) && + !atomic_read(&pcm->in_count)) { + mutex_unlock(&pcm->read_lock); + return 0; + } + + data = q6asm_is_cpu_buf_avail(OUT, pcm->ac, &size, &idx); + if (count >= size) + len = size; + else { + len = count; + pr_aud_err("%s: short read data[%p]bytesavail[%d]" + "bytesrequest[%d]" + "bytesrejected%d]\n",\ + __func__, data, size, + count, (size - count)); + } + if ((len) && data) { + offset = pcm->in_frame_info[idx][1]; + if (copy_to_user(buf, data+offset, len)) { + pr_aud_err("%s copy_to_user failed len[%d]\n", + __func__, len); + rc = -EFAULT; + goto fail; + } + count -= len; + buf += len; + } + atomic_dec(&pcm->in_count); + memset(&pcm->in_frame_info[idx], 0, + sizeof(uint32_t) * 2); + + rc = q6asm_read(pcm->ac); + if (rc < 0) { + pr_aud_err("%s q6asm_read faile\n", __func__); + goto fail; + } + rmb(); + break; + } + rc = buf-start; +fail: + mutex_unlock(&pcm->read_lock); + return rc; +} + +static int pcm_in_release(struct inode *inode, struct file *file) +{ + int rc = 0; + struct timespec ts; + struct rtc_time tm; + struct pcm *pcm = file->private_data; + + if (pcm == NULL) { + pr_aud_err("%s: Nothing need to be released.\n", __func__); + return 0; + } + if (pcm->ac) { + mutex_lock(&pcm->lock); + + if ((pcm->rec_mode != VOC_REC_NONE) && atomic_read(&pcm->in_enabled)) { + msm_disable_incall_recording(pcm->ac->session, pcm->rec_mode); + + pcm->rec_mode = VOC_REC_NONE; + } + + + /* remove this session from topology list */ + auddev_cfg_tx_copp_topology(pcm->ac->session, + DEFAULT_COPP_TOPOLOGY); + mutex_unlock(&pcm->lock); + } + + rc = pcm_in_disable(pcm); + if (pcm->ac) { + pr_aud_info("[%s:%s] release session id[%d]\n", __MM_FILE__, + __func__, pcm->ac->session); + msm_clear_session_id(pcm->ac->session); + q6asm_audio_client_free(pcm->ac); + } + + kfree(pcm); + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + atomic_set(&pcm_opened, 0); + pr_aud_info1("[ATS][stop_recording][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + return rc; +} + +static const struct file_operations pcm_in_fops = { + .owner = THIS_MODULE, + .open = pcm_in_open, + .read = pcm_in_read, + .release = pcm_in_release, + .unlocked_ioctl = pcm_in_ioctl, +}; + +struct miscdevice pcm_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_in", + .fops = &pcm_in_fops, +}; + +static int __init pcm_in_init(void) +{ + return misc_register(&pcm_in_misc); +} + +device_initcall(pcm_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/pcm_out.c b/arch/arm/mach-msm/qdsp6v3/pcm_out.c new file mode 100644 index 00000000000..4ba5c287e4a --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/pcm_out.c @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2009 Google, Inc. + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_BUF 2 +#define BUFSZ (4800) + +struct pcm { + struct mutex lock; + struct mutex write_lock; + spinlock_t dsp_lock; + wait_queue_head_t write_wait; + struct audio_client *ac; + uint32_t sample_rate; + uint32_t channel_count; + uint32_t buffer_size; + uint32_t buffer_count; + uint32_t rec_mode; + uint32_t stream_event; + uint32_t volume; + atomic_t out_count; + atomic_t out_enabled; + atomic_t out_opened; + atomic_t out_stopped; + atomic_t out_prefill; + struct wake_lock wakelock; +}; + +void pcm_out_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct pcm *pcm = (struct pcm *) priv; + unsigned long flags; + + spin_lock_irqsave(&pcm->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&pcm->out_count); + wake_up(&pcm->write_wait); + break; + default: + break; + } + spin_unlock_irqrestore(&pcm->dsp_lock, flags); +} + +static void audio_prevent_sleep(struct pcm *audio) +{ + pr_debug("%s:\n", __func__); + wake_lock(&audio->wakelock); +} + +static void audio_allow_sleep(struct pcm *audio) +{ + pr_debug("%s:\n", __func__); + wake_unlock(&audio->wakelock); +} + +static int pcm_out_enable(struct pcm *pcm) +{ + if (atomic_read(&pcm->out_enabled)) + return 0; + return q6asm_run(pcm->ac, 0, 0, 0); +} + +static int pcm_out_disable(struct pcm *pcm) +{ + int rc = 0; + + if (atomic_read(&pcm->out_opened)) { + atomic_set(&pcm->out_enabled, 0); + atomic_set(&pcm->out_opened, 0); + rc = q6asm_cmd(pcm->ac, CMD_CLOSE); + + atomic_set(&pcm->out_stopped, 1); + wake_up(&pcm->write_wait); + } + return rc; +} + +static int config(struct pcm *pcm) +{ + int rc = 0; + if (!atomic_read(&pcm->out_prefill)) { + pr_debug("%s: pcm prefill\n", __func__); + rc = q6asm_audio_client_buf_alloc(IN, pcm->ac, + pcm->buffer_size, pcm->buffer_count); + if (rc < 0) { + pr_aud_err("Audio Start: Buffer Allocation failed \ + rc = %d\n", rc); + goto fail; + } + + rc = q6asm_media_format_block_pcm(pcm->ac, pcm->sample_rate, + pcm->channel_count); + if (rc < 0) + pr_aud_err("%s: CMD Format block failed\n", __func__); + + atomic_set(&pcm->out_prefill, 1); + atomic_set(&pcm->out_count, pcm->buffer_count); + } +fail: + return rc; +} + +static void pcm_event_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct pcm *pcm = (struct pcm *) private_data; + int rc = 0; + + switch (evt_id) { + case AUDDEV_EVT_STREAM_VOL_CHG: + pcm->volume = evt_payload->session_vol; + pr_debug("%s: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, " + "enabled = %d\n", __func__, pcm->volume, + atomic_read(&pcm->out_enabled)); + if (atomic_read(&pcm->out_enabled)) { + if (pcm->ac) { + rc = q6asm_set_volume(pcm->ac, pcm->volume); + if (rc < 0) + pr_aud_err("%s: Send Volume command" + "failed rc=%d\n", __func__, rc); + } + } + break; + default: + pr_aud_err("%s:ERROR:wrong event\n", __func__); + break; + } +} + +static long pcm_out_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct pcm *pcm = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return 0; + } + + mutex_lock(&pcm->lock); + switch (cmd) { + case AUDIO_SET_VOLUME: { + pr_aud_info("%s: AUDIO_SET_VOLUME, vol %lu\n", __func__, arg); + rc = q6asm_set_volume(pcm->ac, arg); + if (rc < 0) + pr_aud_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + break; + } + case AUDIO_START: { + pr_aud_info("%s: AUDIO_START\n", __func__); + rc = config(pcm); + if (rc) { + pr_aud_err("%s: Out Configuration failed\n", __func__); + rc = -EFAULT; + break; + } + + rc = pcm_out_enable(pcm); + if (rc) { + pr_aud_err("Out enable failed\n"); + rc = -EFAULT; + break; + } + audio_prevent_sleep(pcm); + atomic_set(&pcm->out_enabled, 1); + + rc = q6asm_set_volume(pcm->ac, pcm->volume); + if (rc < 0) + pr_aud_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + rc = q6asm_set_lrgain(pcm->ac, 0x2000, 0x2000); + if (rc < 0) + pr_aud_err("%s: Send channel gain failed rc=%d\n", + __func__, rc); + /* disable mute by default */ + rc = q6asm_set_mute(pcm->ac, 0); + if (rc < 0) + pr_aud_err("%s: Send mute command failed rc=%d\n", + __func__, rc); + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &pcm->ac->session, + sizeof(unsigned short))) + rc = -EFAULT; + break; + } + case AUDIO_STOP: + break; + case AUDIO_FLUSH: + break; + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + pr_aud_info("AUDIO_SET_CONFIG\n"); + if (copy_from_user(&config, (void *) arg, sizeof(config))) { + rc = -EFAULT; + break; + } + if (config.channel_count < 1 || config.channel_count > 2) { + rc = -EINVAL; + break; + } + if (config.sample_rate < 8000 || config.sample_rate > 48000) { + rc = -EINVAL; + break; + } + if (config.buffer_size < 128) { + rc = -EINVAL; + break; + } + pcm->sample_rate = config.sample_rate; + pcm->channel_count = config.channel_count; + pcm->buffer_size = config.buffer_size; + pcm->buffer_count = config.buffer_count; + pr_debug("%s:buffer_size:%d buffer_count:%d sample_rate:%d \ + channel_count:%d\n", __func__, pcm->buffer_size, + pcm->buffer_count, pcm->sample_rate, + pcm->channel_count); + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config config; + pr_aud_info("AUDIO_GET_CONFIG\n"); + config.buffer_size = pcm->buffer_size; + config.buffer_count = pcm->buffer_count; + config.sample_rate = pcm->sample_rate; + config.channel_count = pcm->channel_count; + config.unused[0] = 0; + config.unused[1] = 0; + config.unused[2] = 0; + if (copy_to_user((void *) arg, &config, sizeof(config))) + rc = -EFAULT; + break; + } + case AUDIO_SET_EQ: { + struct msm_audio_eq_stream_config eq_config; + if (copy_from_user(&eq_config, (void *) arg, + sizeof(eq_config))) { + rc = -EFAULT; + break; + } + rc = q6asm_equalizer(pcm->ac, (void *) &eq_config); + if (rc < 0) + pr_aud_err("%s: EQUALIZER FAILED\n", __func__); + break; + } + default: + rc = -EINVAL; + } + mutex_unlock(&pcm->lock); + return rc; +} + +static int pcm_out_open(struct inode *inode, struct file *file) +{ + struct pcm *pcm; + int rc = 0; + + struct timespec ts; + struct rtc_time tm; + + pr_aud_info("[%s:%s] open\n", __MM_FILE__, __func__); + pcm = kzalloc(sizeof(struct pcm), GFP_KERNEL); + if (!pcm) { + pr_aud_info("%s: Failed to allocated memory\n", __func__); + return -ENOMEM; + } + + pcm->channel_count = 2; + pcm->sample_rate = 44100; + pcm->buffer_size = BUFSZ; + pcm->buffer_count = MAX_BUF; + pcm->stream_event = AUDDEV_EVT_STREAM_VOL_CHG; + pcm->volume = 0x2000; + + pcm->ac = q6asm_audio_client_alloc((app_cb)pcm_out_cb, (void *)pcm); + if (!pcm->ac) { + pr_aud_err("%s: Could not allocate memory\n", __func__); + rc = -ENOMEM; + goto fail; + } + + rc = q6asm_open_write(pcm->ac, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s: pcm out open failed for session %d\n", __func__, + pcm->ac->session); + rc = -EINVAL; + goto fail; + } + + mutex_init(&pcm->lock); + mutex_init(&pcm->write_lock); + init_waitqueue_head(&pcm->write_wait); + spin_lock_init(&pcm->dsp_lock); + atomic_set(&pcm->out_enabled, 0); + atomic_set(&pcm->out_stopped, 0); + atomic_set(&pcm->out_count, pcm->buffer_count); + atomic_set(&pcm->out_prefill, 0); + atomic_set(&pcm->out_opened, 1); + wake_lock_init(&pcm->wakelock, WAKE_LOCK_SUSPEND, "audio_pcm"); + + rc = auddev_register_evt_listner(pcm->stream_event, + AUDDEV_CLNT_DEC, + pcm->ac->session, + pcm_event_listner, + (void *)pcm); + if (rc < 0) { + pr_aud_err("%s: failed to register listner\n", __func__); + goto fail; + } + + file->private_data = pcm; + + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_aud_info1("[ATS][play_music][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + pr_debug("[%s:%s] open session id[%d]\n", __MM_FILE__, + __func__, pcm->ac->session); + return 0; +fail: + if (pcm->ac) + q6asm_audio_client_free(pcm->ac); + kfree(pcm); + return rc; +} + +static ssize_t pcm_out_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct pcm *pcm = file->private_data; + const char __user *start = buf; + int xfer; + char *bufptr; + uint32_t idx; + void *data; + int rc = 0; + uint32_t size; + + if (!pcm->ac) + return -ENODEV; + + if (!atomic_read(&pcm->out_enabled)) { + rc = config(pcm); + if (rc < 0) + return rc; + } + + mutex_lock(&pcm->write_lock); + while (count > 0) { + rc = wait_event_timeout(pcm->write_wait, + (atomic_read(&pcm->out_count) || + atomic_read(&pcm->out_stopped)), 5 * HZ); + if (!rc) { + pr_aud_info("%s: wait_event_timeout failed for session %d\n", + __func__, pcm->ac->session); + goto fail; + } + + if (atomic_read(&pcm->out_stopped) && + !atomic_read(&pcm->out_count)) { + pr_aud_info("%s: pcm stopped out_count 0\n", __func__); + mutex_unlock(&pcm->write_lock); + return 0; + } + + data = q6asm_is_cpu_buf_avail(IN, pcm->ac, &size, &idx); + bufptr = data; + if (bufptr) { + xfer = count; + if (xfer > BUFSZ) + xfer = BUFSZ; + + if (copy_from_user(bufptr, buf, xfer)) { + rc = -EFAULT; + goto fail; + } + buf += xfer; + count -= xfer; + rc = q6asm_write(pcm->ac, xfer, 0, 0, NO_TIMESTAMP); + wmb(); + if (rc < 0) { + rc = -EFAULT; + goto fail; + } + } + atomic_dec(&pcm->out_count); + } + + rc = buf - start; +fail: + mutex_unlock(&pcm->write_lock); + return rc; +} + +static int pcm_out_release(struct inode *inode, struct file *file) +{ + struct pcm *pcm = file->private_data; + struct timespec ts; + struct rtc_time tm; + + if (pcm == NULL) { + pr_aud_err("%s: Nothing need to be released.\n", __func__); + return 0; + } + if (pcm->ac) { + pr_aud_info("[%s:%s] release session id[%d]\n", __MM_FILE__, + __func__, pcm->ac->session); + pcm_out_disable(pcm); + msm_clear_session_id(pcm->ac->session); + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, pcm->ac->session); + q6asm_audio_client_free(pcm->ac); + } + audio_allow_sleep(pcm); + wake_lock_destroy(&pcm->wakelock); + mutex_destroy(&pcm->lock); + mutex_destroy(&pcm->write_lock); + + kfree(pcm); + pr_aud_info("[%s:%s] release\n", __MM_FILE__, __func__); + + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_aud_info1("[ATS][stop_music][successful] at %lld \ + (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + return 0; +} + +static const struct file_operations pcm_out_fops = { + .owner = THIS_MODULE, + .open = pcm_out_open, + .write = pcm_out_write, + .release = pcm_out_release, + .unlocked_ioctl = pcm_out_ioctl, +}; + +struct miscdevice pcm_out_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_pcm_out", + .fops = &pcm_out_fops, +}; + +static int __init pcm_out_init(void) +{ + return misc_register(&pcm_out_misc); +} + +device_initcall(pcm_out_init); diff --git a/arch/arm/mach-msm/qdsp6v3/q6adm.c b/arch/arm/mach-msm/qdsp6v3/q6adm.c new file mode 100644 index 00000000000..b2e015180f2 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6adm.c @@ -0,0 +1,651 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" +#include "rtac.h" + +#define TIMEOUT_MS 1000 +#define AUDIO_RX 0x0 +#define AUDIO_TX 0x1 +#define RESET_COPP_ID 99 +#define INVALID_COPP_ID 0xFF + +struct adm_ctl { + void *apr; + atomic_t copp_id[AFE_MAX_PORTS]; + atomic_t copp_cnt[AFE_MAX_PORTS]; + atomic_t copp_stat[AFE_MAX_PORTS]; + wait_queue_head_t wait; +}; + +static struct adm_ctl this_adm; + +static int32_t adm_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *payload; + int i, index; + payload = data->payload; + + if (data->opcode == RESET_EVENTS) { + pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n", + data->reset_event, data->reset_proc, + this_adm.apr); + if (this_adm.apr) { + apr_reset(this_adm.apr); + for (i = 0; i < AFE_MAX_PORTS; i++) { + atomic_set(&this_adm.copp_id[i], RESET_COPP_ID); + atomic_set(&this_adm.copp_cnt[i], 0); + atomic_set(&this_adm.copp_stat[i], 0); + } + this_adm.apr = NULL; + } + return 0; + } + + pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__, + data->opcode, payload[0], payload[1], + data->payload_size); + + if (data->payload_size) { + index = afe_get_port_index(data->token); + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + return -EINVAL; + } + + pr_debug("%s: Port ID %d, index %d\n", __func__, + data->token, index); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + pr_debug("APR_BASIC_RSP_RESULT\n"); + switch (payload[0]) { + case ADM_CMD_SET_PARAMS: +#ifdef CONFIG_MSM8X60_RTAC + if (rtac_make_adm_callback(payload, + data->payload_size)) + break; +#endif + case ADM_CMD_COPP_CLOSE: + case ADM_CMD_MEMORY_MAP: + case ADM_CMD_MEMORY_UNMAP: + case ADM_CMD_MEMORY_MAP_REGIONS: + case ADM_CMD_MEMORY_UNMAP_REGIONS: + case ADM_CMD_MATRIX_MAP_ROUTINGS: + pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n"); + atomic_set(&this_adm.copp_stat[index], 1); + wake_up(&this_adm.wait); + break; + default: + pr_aud_err("%s: Unknown Cmd: 0x%x\n", __func__, + payload[0]); + break; + } + return 0; + } + + switch (data->opcode) { + case ADM_CMDRSP_COPP_OPEN: { + struct adm_copp_open_respond *open = data->payload; + if (open->copp_id == INVALID_COPP_ID) { + pr_aud_err("%s: invalid coppid rxed %d\n", + __func__, open->copp_id); + atomic_set(&this_adm.copp_stat[index], 1); + wake_up(&this_adm.wait); + break; + } + atomic_set(&this_adm.copp_id[index], open->copp_id); + atomic_set(&this_adm.copp_stat[index], 1); + pr_debug("%s: coppid rxed=%d\n", __func__, + open->copp_id); + wake_up(&this_adm.wait); + } + break; +#ifdef CONFIG_MSM8X60_RTAC + case ADM_CMDRSP_GET_PARAMS: + pr_debug("ADM_CMDRSP_GET_PARAMS\n"); + rtac_make_adm_callback(payload, + data->payload_size); + break; +#endif + default: + pr_aud_err("%s: Unknown cmd:0x%x\n", __func__, + data->opcode); + break; + } + } + return 0; +} + +void send_cal(int port_id, struct acdb_cal_block *aud_cal) +{ + s32 result; + struct adm_set_params_command adm_params; + int index = afe_get_port_index(port_id); + + pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index); + + if (!aud_cal || aud_cal->cal_size == 0) { + pr_aud_info("%s: No calibration data to send!\n", __func__); + goto done; + } + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + goto done; + } + + adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(20), APR_PKT_VER); + adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(adm_params)); + adm_params.hdr.src_svc = APR_SVC_ADM; + adm_params.hdr.src_domain = APR_DOMAIN_APPS; + adm_params.hdr.src_port = port_id; + adm_params.hdr.dest_svc = APR_SVC_ADM; + adm_params.hdr.dest_domain = APR_DOMAIN_ADSP; + adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); + adm_params.hdr.token = port_id; + adm_params.hdr.opcode = ADM_CMD_SET_PARAMS; + adm_params.payload = aud_cal->cal_paddr; + adm_params.payload_size = aud_cal->cal_size; + + atomic_set(&this_adm.copp_stat[index], 0); + pr_aud_info("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n", + __func__, adm_params.payload, adm_params.payload_size); + result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params); + if (result < 0) { + pr_aud_err("%s: Set params failed port = %d payload = 0x%x\n", + __func__, port_id, aud_cal->cal_paddr); + goto done; + } + /* Wait for the callback */ + result = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!result) + pr_aud_err("%s: Set params timed out port = %d, payload = 0x%x\n", + __func__, port_id, aud_cal->cal_paddr); +done: + return; +} + +void send_adm_cal(int port_id, int path) +{ + s32 acdb_path; + struct acdb_cal_block aud_cal; + + pr_aud_info("%s\n", __func__); + + /* Maps audio_dev_ctrl path definition to ACDB definition */ + acdb_path = path - 1; + if ((acdb_path >= NUM_AUDPROC_BUFFERS) || + (acdb_path < 0)) { + pr_aud_err("%s: Path is not RX or TX, path = %d\n", + __func__, path); + goto done; + } + + pr_aud_info("%s: Sending audproc cal, acdb_path %d\n", + __func__, acdb_path); + get_audproc_cal(acdb_path, &aud_cal); + send_cal(port_id, &aud_cal); + + pr_aud_info("%s: Sending audvol cal, acdb_path %d\n", + __func__, acdb_path); + get_audvol_cal(acdb_path, &aud_cal); + send_cal(port_id, &aud_cal); +done: + return; +} + +int adm_open(int port_id, int path, int rate, int channel_mode, int topology) +{ + struct adm_copp_open_command open; + int ret = 0; + int index; + + pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__, + port_id, path, rate, channel_mode); + pr_aud_info("Topology = %x\n", topology); + + if (afe_validate_port(port_id) < 0) { + pr_aud_err("%s port idi[%d] is invalid\n", __func__, port_id); + return -ENODEV; + } + + index = afe_get_port_index(port_id); + pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); + + if (this_adm.apr == NULL) { + this_adm.apr = apr_register("ADSP", "ADM", adm_callback, + 0xFFFFFFFF, &this_adm); + if (this_adm.apr == NULL) { + pr_aud_err("%s: Unable to register ADM\n", __func__); + ret = -ENODEV; + return ret; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_adm_handle(this_adm.apr); +#endif + } + + + /* Create a COPP if port id are not enabled */ + if (atomic_read(&this_adm.copp_cnt[index]) == 0) { + + open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + open.hdr.pkt_size = sizeof(open); + open.hdr.src_svc = APR_SVC_ADM; + open.hdr.src_domain = APR_DOMAIN_APPS; + open.hdr.src_port = port_id; + open.hdr.dest_svc = APR_SVC_ADM; + open.hdr.dest_domain = APR_DOMAIN_ADSP; + open.hdr.dest_port = port_id; + open.hdr.token = port_id; + open.hdr.opcode = ADM_CMD_COPP_OPEN; + + open.mode = path; + open.endpoint_id1 = port_id; + open.endpoint_id2 = 0xFFFF; + open.topology_id = topology; + + open.channel_config = channel_mode & 0x00FF; + open.rate = rate; + + pr_debug("%s: channel_config=%d port_id=%d rate=%d\ + topology_id=0x%X\n", __func__, open.channel_config,\ + open.endpoint_id1, open.rate,\ + open.topology_id); + + atomic_set(&this_adm.copp_stat[index], 0); + + ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); + if (ret < 0) { + pr_aud_err("%s:ADM enable for port %d failed\n", + __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + /* Wait for the callback with copp id */ + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s ADM open failed for port %d\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + } + atomic_inc(&this_adm.copp_cnt[index]); + return 0; + +fail_cmd: + + return ret; +} + +int adm_matrix_map(int session_id, int path, int num_copps, + unsigned int *port_id, int copp_id) +{ + struct adm_routings_command route; + int ret = 0, i = 0; + /* Assumes port_ids have already been validated during adm_open */ + int index = afe_get_port_index(copp_id); + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + return -EINVAL; + } + + pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n", + __func__, session_id, path, num_copps, port_id[0]); + + route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + route.hdr.pkt_size = sizeof(route); + route.hdr.src_svc = 0; + route.hdr.src_domain = APR_DOMAIN_APPS; + route.hdr.src_port = copp_id; + route.hdr.dest_svc = APR_SVC_ADM; + route.hdr.dest_domain = APR_DOMAIN_ADSP; + route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); + route.hdr.token = copp_id; + route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS; + route.num_sessions = 1; + route.session[0].id = session_id; + route.session[0].num_copps = num_copps; + + for (i = 0; i < num_copps; i++) { + int tmp; + tmp = afe_get_port_index(port_id[i]); + + pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i, + port_id[i], tmp); + if (tmp < 0 || tmp >= AFE_MAX_PORTS) { + pr_aud_err("afe_get_port_index return invalid %d\n", tmp); + ret = -EINVAL; + goto fail_cmd; + } + route.session[0].copp_id[i] = + atomic_read(&this_adm.copp_id[tmp]); + } + if (num_copps % 2) + route.session[0].copp_id[i] = 0; + + switch (path) { + case 0x1: + route.path = AUDIO_RX; + break; + case 0x2: + case 0x3: + route.path = AUDIO_TX; + break; + default: + pr_aud_err("%s: Wrong path set[%d]\n", __func__, path); + break; + } + atomic_set(&this_adm.copp_stat[index], 0); + + ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route); + if (ret < 0) { + pr_aud_err("%s: ADM routing for port %d failed\n", + __func__, port_id[0]); + ret = -EINVAL; + goto fail_cmd; + } + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: ADM cmd Route failed for port %d\n", + __func__, port_id[0]); + ret = -EINVAL; + goto fail_cmd; + } + + for (i = 0; i < num_copps; i++) + send_adm_cal(port_id[i], path); + +#ifdef CONFIG_MSM8X60_RTAC + for (i = 0; i < num_copps; i++) + rtac_add_adm_device(port_id[i], session_id); +#endif + return 0; + +fail_cmd: + + return ret; +} + +int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id, + uint32_t *bufsz, uint32_t bufcnt) +{ + struct adm_cmd_memory_map_regions *mmap_regions = NULL; + struct adm_memory_map_regions *mregions = NULL; + void *mmap_region_cmd = NULL; + void *payload = NULL; + int ret = 0; + int i = 0; + int cmd_size = 0; + + pr_aud_info("%s\n", __func__); + if (this_adm.apr == NULL) { + this_adm.apr = apr_register("ADSP", "ADM", adm_callback, + 0xFFFFFFFF, &this_adm); + if (this_adm.apr == NULL) { + pr_aud_err("%s: Unable to register ADM\n", __func__); + ret = -ENODEV; + return ret; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_adm_handle(this_adm.apr); +#endif + } + + cmd_size = sizeof(struct adm_cmd_memory_map_regions) + + sizeof(struct adm_memory_map_regions) * bufcnt; + + mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!mmap_region_cmd) { + pr_aud_err("%s: allocate mmap_region_cmd failed\n", __func__); + return -ENOMEM; + } + mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd; + mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + mmap_regions->hdr.pkt_size = cmd_size; + mmap_regions->hdr.src_port = 0; + mmap_regions->hdr.dest_port = 0; + mmap_regions->hdr.token = 0; + mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS; + mmap_regions->mempool_id = mempool_id & 0x00ff; + mmap_regions->nregions = bufcnt & 0x00ff; + pr_debug("%s: map_regions->nregions = %d\n", __func__, + mmap_regions->nregions); + payload = ((u8 *) mmap_region_cmd + + sizeof(struct adm_cmd_memory_map_regions)); + mregions = (struct adm_memory_map_regions *)payload; + + for (i = 0; i < bufcnt; i++) { + mregions->phys = buf_add[i]; + mregions->buf_size = bufsz[i]; + ++mregions; + } + + atomic_set(&this_adm.copp_stat[0], 0); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd); + if (ret < 0) { + pr_aud_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, + mmap_regions->hdr.opcode, ret); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[0]), 5 * HZ); + if (!ret) { + pr_aud_err("%s: timeout. waited for memory_map\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } +fail_cmd: + kfree(mmap_region_cmd); + return ret; +} + +int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz, + uint32_t bufcnt) +{ + struct adm_cmd_memory_unmap_regions *unmap_regions = NULL; + struct adm_memory_unmap_regions *mregions = NULL; + void *unmap_region_cmd = NULL; + void *payload = NULL; + int ret = 0; + int i = 0; + int cmd_size = 0; + + pr_aud_info("%s\n", __func__); + + if (this_adm.apr == NULL) { + pr_aud_err("%s APR handle NULL\n", __func__); + return -EINVAL; + } + + cmd_size = sizeof(struct adm_cmd_memory_unmap_regions) + + sizeof(struct adm_memory_unmap_regions) * bufcnt; + + unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!unmap_region_cmd) { + pr_aud_err("%s: allocate unmap_region_cmd failed\n", __func__); + return -ENOMEM; + } + unmap_regions = (struct adm_cmd_memory_unmap_regions *) + unmap_region_cmd; + unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + unmap_regions->hdr.pkt_size = cmd_size; + unmap_regions->hdr.src_port = 0; + unmap_regions->hdr.dest_port = 0; + unmap_regions->hdr.token = 0; + unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS; + unmap_regions->nregions = bufcnt & 0x00ff; + unmap_regions->reserved = 0; + pr_debug("%s: unmap_regions->nregions = %d\n", __func__, + unmap_regions->nregions); + payload = ((u8 *) unmap_region_cmd + + sizeof(struct adm_cmd_memory_unmap_regions)); + mregions = (struct adm_memory_unmap_regions *)payload; + + for (i = 0; i < bufcnt; i++) { + mregions->phys = buf_add[i]; + ++mregions; + } + atomic_set(&this_adm.copp_stat[0], 0); + ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd); + if (ret < 0) { + pr_aud_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, + unmap_regions->hdr.opcode, ret); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[0]), 5 * HZ); + if (!ret) { + pr_aud_err("%s: timeout. waited for memory_unmap\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } +fail_cmd: + kfree(unmap_region_cmd); + return ret; +} + +#ifdef CONFIG_MSM8X60_RTAC +int adm_get_copp_id(int port_id) +{ + pr_debug("%s\n", __func__); + + if (port_id < 0) { + pr_aud_err("%s: invalid port_id = %d\n", __func__, port_id); + return -EINVAL; + } + + return atomic_read(&this_adm.copp_id[port_id]); +} +#endif + +int adm_close(int port_id) +{ + struct apr_hdr close; + + int ret = 0; + int index = afe_get_port_index(port_id); + + if (index < 0 || index >= AFE_MAX_PORTS) { + pr_aud_err("%s: invalid index %d\n", __func__, index); + return -EINVAL; + } + + pr_aud_info("%s port_id=%d index %d\n", __func__, port_id, index); + + if (!(atomic_read(&this_adm.copp_cnt[index]))) { + pr_aud_err("%s: copp count for port[%d]is 0\n", __func__, port_id); + + goto fail_cmd; + } + atomic_dec(&this_adm.copp_cnt[index]); + if (!(atomic_read(&this_adm.copp_cnt[index]))) { + + close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + close.pkt_size = sizeof(close); + close.src_svc = APR_SVC_ADM; + close.src_domain = APR_DOMAIN_APPS; + close.src_port = port_id; + close.dest_svc = APR_SVC_ADM; + close.dest_domain = APR_DOMAIN_ADSP; + close.dest_port = atomic_read(&this_adm.copp_id[index]); + close.token = port_id; + close.opcode = ADM_CMD_COPP_CLOSE; + + atomic_set(&this_adm.copp_id[index], RESET_COPP_ID); + atomic_set(&this_adm.copp_stat[index], 0); + + + pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n", + __func__, + atomic_read(&this_adm.copp_id[index]), + port_id, index, + atomic_read(&this_adm.copp_cnt[index])); + + ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close); + if (ret < 0) { + pr_aud_err("%s ADM close failed\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_adm.wait, + atomic_read(&this_adm.copp_stat[index]), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_info("%s: ADM cmd Route failed for port %d\n", + __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + +#ifdef CONFIG_MSM8X60_RTAC + rtac_remove_adm_device(port_id); +#endif + } + +fail_cmd: + return ret; +} + +static int __init adm_init(void) +{ + int i = 0; + pr_aud_info("%s\n", __func__); + init_waitqueue_head(&this_adm.wait); + this_adm.apr = NULL; + + for (i = 0; i < AFE_MAX_PORTS; i++) { + atomic_set(&this_adm.copp_id[i], RESET_COPP_ID); + atomic_set(&this_adm.copp_cnt[i], 0); + atomic_set(&this_adm.copp_stat[i], 0); + } + return 0; +} + +device_initcall(adm_init); diff --git a/arch/arm/mach-msm/qdsp6v3/q6adm.h b/arch/arm/mach-msm/qdsp6v3/q6adm.h new file mode 100644 index 00000000000..6d01738b7a8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6adm.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __Q6_ADM_H__ +#define __Q6_ADM_H__ +#include + +/* multiple copp per stream. */ +struct route_payload { + unsigned int copp_ids[AFE_MAX_PORTS]; + unsigned short num_copps; + unsigned int session_id; +}; + +int adm_open(int port, int path, int rate, int mode, int topology); + +int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id, + uint32_t *bufsz, uint32_t bufcnt); + +int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz, + uint32_t bufcnt); + +int adm_close(int port); + +int adm_matrix_map(int session_id, int path, int num_copps, + unsigned int *port_id, int copp_id); + +#ifdef CONFIG_MSM8X60_RTAC +int adm_get_copp_id(int port_id); +#endif + +#endif /* __Q6_ADM_H__ */ diff --git a/arch/arm/mach-msm/qdsp6v3/q6afe.c b/arch/arm/mach-msm/qdsp6v3/q6afe.c new file mode 100644 index 00000000000..a51f6ab83a7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6afe.c @@ -0,0 +1,687 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct afe_ctl { + void *apr; + atomic_t state; + wait_queue_head_t wait; + struct task_struct *task; +}; + +static struct afe_ctl this_afe; + +#define TIMEOUT_MS 1000 +#define Q6AFE_MAX_VOLUME 0x3FFF + + +static int32_t afe_callback(struct apr_client_data *data, void *priv) +{ + if (data->opcode == RESET_EVENTS) { + pr_debug("q6afe: reset event = %d %d apr[%p]\n", + data->reset_event, data->reset_proc, this_afe.apr); + if (this_afe.apr) { + apr_reset(this_afe.apr); + atomic_set(&this_afe.state, 0); + this_afe.apr = NULL; + } + /* send info to user */ + pr_debug("task_name = %s pid = %d\n", + this_afe.task->comm, this_afe.task->pid); + send_sig(SIGUSR1, this_afe.task, 0); + } + if (data->payload_size) { + uint32_t *payload; + payload = data->payload; + pr_debug("%s: cmd = 0x%x status = 0x%x\n", __func__, + payload[0], payload[1]); + if (data->opcode == APR_BASIC_RSP_RESULT) { + switch (payload[0]) { + case AFE_PORT_AUDIO_IF_CONFIG: + case AFE_PORT_CMD_STOP: + case AFE_PORT_CMD_START: + case AFE_PORT_CMD_LOOPBACK: + case AFE_PORT_CMD_SIDETONE_CTL: + case AFE_PORT_CMD_SET_PARAM: + case AFE_PSEUDOPORT_CMD_START: + case AFE_PSEUDOPORT_CMD_STOP: + atomic_set(&this_afe.state, 0); + wake_up(&this_afe.wait); + break; + default: + pr_aud_err("Unknown cmd 0x%x\n", + payload[0]); + break; + } + } + } + return 0; +} + +int afe_validate_port(u16 port_id) +{ + int ret; + + switch (port_id) { + case PRIMARY_I2S_RX: + case PRIMARY_I2S_TX: + case PCM_RX: + case PCM_TX: + case SECONDARY_I2S_RX: + case SECONDARY_I2S_TX: + case MI2S_RX: + case MI2S_TX: + case HDMI_RX: + case RSVD_2: + case RSVD_3: + case DIGI_MIC_TX: + case VOICE_RECORD_RX: + case VOICE_RECORD_TX: + case VOICE_PLAYBACK_TX: + { + ret = 0; + break; + } + + default: + ret = -EINVAL; + } + + return ret; +} + +int afe_get_port_index(u16 port_id) +{ + switch (port_id) { + case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX; + case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX; + case PCM_RX: return IDX_PCM_RX; + case PCM_TX: return IDX_PCM_TX; + case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX; + case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX; + case MI2S_RX: return IDX_MI2S_RX; + case MI2S_TX: return IDX_MI2S_TX; + case HDMI_RX: return IDX_HDMI_RX; + case RSVD_2: return IDX_RSVD_2; + case RSVD_3: return IDX_RSVD_3; + case DIGI_MIC_TX: return IDX_DIGI_MIC_TX; + case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX; + case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX; + case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX; + default: return -EINVAL; + } +} + +int afe_open(u16 port_id, union afe_port_config *afe_config, int rate) +{ + struct afe_port_start_command start; + struct afe_audioif_config_command config; + int ret = 0; + + if (!afe_config) { + pr_aud_err("%s: Error, no configuration data\n", __func__); + ret = -EINVAL; + return ret; + } + + pr_aud_info("%s: %d %d\n", __func__, port_id, rate); + + if (this_afe.apr == NULL) { + this_afe.apr = apr_register("ADSP", "AFE", afe_callback, + 0xFFFFFFFF, &this_afe); + pr_aud_info("%s: Register AFE\n", __func__); + if (this_afe.apr == NULL) { + pr_aud_err("%s: Unable to register AFE\n", __func__); + ret = -ENODEV; + return ret; + } + } + + config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + config.hdr.pkt_size = sizeof(config); + config.hdr.src_port = 0; + config.hdr.dest_port = 0; + config.hdr.token = 0; + config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; + + if (afe_validate_port(port_id) < 0) { + + pr_aud_err("%s: Failed : Invalid Port id = %d\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + + config.port_id = port_id; + config.port = *afe_config; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); + if (ret < 0) { + pr_aud_err("%s: AFE enable for port %d failed\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + + start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + start.hdr.pkt_size = sizeof(start); + start.hdr.src_port = 0; + start.hdr.dest_port = 0; + start.hdr.token = 0; + start.hdr.opcode = AFE_PORT_CMD_START; + start.port_id = port_id; + start.gain = 0x2000; + start.sample_rate = rate; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); + if (ret < 0) { + pr_aud_err("%s: AFE enable for port %d failed\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + + if (this_afe.task != current) + this_afe.task = current; + + pr_debug("task_name = %s pid = %d\n", + this_afe.task->comm, this_afe.task->pid); + return 0; +fail_cmd: + return ret; +} + +int afe_loopback(u16 enable, u16 rx_port, u16 tx_port) +{ + struct afe_loopback_command lb_cmd; + int ret = 0; + if (this_afe.apr == NULL) { + pr_aud_err("%s:AFE is not opened\n", __func__); + ret = -1; + goto done; + } + lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(20), APR_PKT_VER); + lb_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(lb_cmd) - APR_HDR_SIZE); + lb_cmd.hdr.src_port = 0; + lb_cmd.hdr.dest_port = 0; + lb_cmd.hdr.token = 0; + lb_cmd.hdr.opcode = AFE_PORT_CMD_LOOPBACK; + lb_cmd.tx_port_id = tx_port; + lb_cmd.rx_port_id = rx_port; + lb_cmd.mode = 0xFFFF; + lb_cmd.enable = (enable ? 1 : 0); + atomic_set(&this_afe.state, 1); + + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &lb_cmd); + if (ret < 0) { + pr_aud_err("AFE loopback failed\n"); + ret = -EINVAL; + goto done; + } + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + } +done: + return ret; +} + + +int afe_loopback_gain(u16 port_id, u16 volume) +{ + struct afe_port_cmd_set_param set_param; + int ret = 0; + + if (this_afe.apr == NULL) { + pr_aud_err("%s: AFE is not opened\n", __func__); + ret = -EPERM; + goto fail_cmd; + } + + if (afe_validate_port(port_id) < 0) { + + pr_aud_err("%s: Failed : Invalid Port id = %d\n", __func__, + port_id); + ret = -EINVAL; + goto fail_cmd; + } + + /* RX ports numbers are even .TX ports numbers are odd. */ + if (port_id % 2 == 0) { + pr_aud_err("%s: Failed : afe loopback gain only for TX ports." + " port_id %d\n", __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + + pr_debug("%s: %d %hX\n", __func__, port_id, volume); + + set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + set_param.hdr.pkt_size = sizeof(set_param); + set_param.hdr.src_port = 0; + set_param.hdr.dest_port = 0; + set_param.hdr.token = 0; + set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM; + + set_param.port_id = port_id; + set_param.payload_size = sizeof(struct afe_param_payload); + set_param.payload_address = 0; + + set_param.payload.module_id = AFE_MODULE_ID_PORT_INFO; + set_param.payload.param_id = AFE_PARAM_ID_LOOPBACK_GAIN; + set_param.payload.param_size = sizeof(struct afe_param_loopback_gain); + set_param.payload.reserved = 0; + + set_param.payload.param.loopback_gain.gain = volume; + set_param.payload.param.loopback_gain.reserved = 0; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param); + if (ret < 0) { + pr_aud_err("%s: AFE param set failed for port %d\n", + __func__, port_id); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (ret < 0) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + return 0; +fail_cmd: + return ret; +} + +int afe_start_pseudo_port(u16 port_id) +{ + int ret = 0; + struct afe_pseudoport_start_command start; + + pr_aud_info("%s: port_id=%d\n", __func__, port_id); + + if (this_afe.apr == NULL) { + this_afe.apr = apr_register("ADSP", "AFE", afe_callback, + 0xFFFFFFFF, &this_afe); + pr_aud_info("%s: Register AFE\n", __func__); + if (this_afe.apr == NULL) { + pr_aud_err("%s: Unable to register AFE\n", __func__); + ret = -ENODEV; + return ret; + } + } + + start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + start.hdr.pkt_size = sizeof(start); + start.hdr.src_port = 0; + start.hdr.dest_port = 0; + start.hdr.token = 0; + start.hdr.opcode = AFE_PSEUDOPORT_CMD_START; + start.port_id = port_id; + start.timing = 1; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); + if (ret < 0) { + pr_aud_err("%s: AFE enable for port %d failed %d\n", + __func__, port_id, ret); + ret = -EINVAL; + return ret; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + return ret; + } + + return 0; +} + +int afe_stop_pseudo_port(u16 port_id) +{ + int ret = 0; + struct afe_pseudoport_stop_command stop; + + pr_aud_info("%s: port_id=%d\n", __func__, port_id); + + if (this_afe.apr == NULL) { + pr_aud_err("%s: AFE is already closed\n", __func__); + ret = -EINVAL; + return ret; + } + + stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + stop.hdr.pkt_size = sizeof(stop); + stop.hdr.src_port = 0; + stop.hdr.dest_port = 0; + stop.hdr.token = 0; + stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP; + stop.port_id = port_id; + stop.reserved = 0; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); + if (ret < 0) { + pr_aud_err("%s: AFE close failed %d\n", __func__, ret); + ret = -EINVAL; + return ret; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + return ret; + } + + return 0; +} + + +#ifdef CONFIG_DEBUG_FS +static struct dentry *debugfs_afelb; +static struct dentry *debugfs_afelb_gain; + +static int afe_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + pr_aud_info("debug intf %s\n", (char *) file->private_data); + return 0; +} + +static int afe_get_parameters(char *buf, long int *param1, int num_of_par) +{ + char *token; + int base, cnt; + + token = strsep(&buf, " "); + + for (cnt = 0; cnt < num_of_par; cnt++) { + if (token != NULL) { + if ((token[1] == 'x') || (token[1] == 'X')) + base = 16; + else + base = 10; + + if (strict_strtoul(token, base, ¶m1[cnt]) != 0) + return -EINVAL; + + token = strsep(&buf, " "); + } else + return -EINVAL; + } + return 0; +} +#define AFE_LOOPBACK_ON (1) +#define AFE_LOOPBACK_OFF (0) +static ssize_t afe_debug_write(struct file *filp, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char *lb_str = filp->private_data; + char lbuf[32]; + int rc; + unsigned long param[5]; + + if (cnt > sizeof(lbuf) - 1) + return -EINVAL; + + rc = copy_from_user(lbuf, ubuf, cnt); + if (rc) + return -EFAULT; + + lbuf[cnt] = '\0'; + + if (!strcmp(lb_str, "afe_loopback")) { + rc = afe_get_parameters(lbuf, param, 3); + if (!rc) { + pr_aud_info("%s %lu %lu %lu\n", lb_str, param[0], param[1], + param[2]); + + if ((param[0] != AFE_LOOPBACK_ON) && (param[0] != + AFE_LOOPBACK_OFF)) { + pr_aud_err("%s: Error, parameter 0 incorrect\n", + __func__); + rc = -EINVAL; + goto afe_error; + } + if ((afe_validate_port(param[1]) < 0) || + (afe_validate_port(param[2])) < 0) { + pr_aud_err("%s: Error, invalid afe port\n", + __func__); + } + if (this_afe.apr == NULL) { + pr_aud_err("%s: Error, AFE not opened\n", __func__); + rc = -EINVAL; + } else { + rc = afe_loopback(param[0], param[1], param[2]); + } + } else { + pr_aud_err("%s: Error, invalid parameters\n", __func__); + rc = -EINVAL; + } + + } else if (!strcmp(lb_str, "afe_loopback_gain")) { + rc = afe_get_parameters(lbuf, param, 2); + if (!rc) { + pr_aud_info("%s %lu %lu\n", lb_str, param[0], param[1]); + + if (afe_validate_port(param[0]) < 0) { + pr_aud_err("%s: Error, invalid afe port\n", + __func__); + rc = -EINVAL; + goto afe_error; + } + + if (param[1] < 0 || param[1] > 100) { + pr_aud_err("%s: Error, volume shoud be 0 to 100" + " percentage param = %lu\n", + __func__, param[1]); + rc = -EINVAL; + goto afe_error; + } + + param[1] = (Q6AFE_MAX_VOLUME * param[1]) / 100; + + if (this_afe.apr == NULL) { + pr_aud_err("%s: Error, AFE not opened\n", __func__); + rc = -EINVAL; + } else { + rc = afe_loopback_gain(param[0], param[1]); + } + } else { + pr_aud_err("%s: Error, invalid parameters\n", __func__); + rc = -EINVAL; + } + } + +afe_error: + if (rc == 0) + rc = cnt; + else + pr_aud_err("%s: rc = %d\n", __func__, rc); + + return rc; +} + +static const struct file_operations afe_debug_fops = { + .open = afe_debug_open, + .write = afe_debug_write +}; +#endif +int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain) +{ + struct afe_port_sidetone_command cmd_sidetone; + int ret = 0; + + pr_aud_info("%s: tx_port_id:%d rx_port_id:%d enable:%d gain:%d\n", __func__, + tx_port_id, rx_port_id, enable, gain); + cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone); + cmd_sidetone.hdr.src_port = 0; + cmd_sidetone.hdr.dest_port = 0; + cmd_sidetone.hdr.token = 0; + cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SIDETONE_CTL; + cmd_sidetone.tx_port_id = tx_port_id; + cmd_sidetone.rx_port_id = rx_port_id; + cmd_sidetone.gain = gain; + cmd_sidetone.enable = enable; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &cmd_sidetone); + if (ret < 0) { + pr_aud_err("%s: AFE sidetone failed for tx_port:%d rx_port:%d\n", + __func__, tx_port_id, rx_port_id); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (ret < 0) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } + return 0; +fail_cmd: + return ret; +} + +int afe_close(int port_id) +{ + struct afe_port_stop_command stop; + int ret = 0; + + if (this_afe.apr == NULL) { + pr_aud_err("AFE is already closed\n"); + ret = -EINVAL; + goto fail_cmd; + } + pr_debug("%s: port_id=%d\n", __func__, port_id); + stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + stop.hdr.pkt_size = sizeof(stop); + stop.hdr.src_port = 0; + stop.hdr.dest_port = 0; + stop.hdr.token = 0; + stop.hdr.opcode = AFE_PORT_CMD_STOP; + stop.port_id = port_id; + stop.reserved = 0; + + atomic_set(&this_afe.state, 1); + ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); + + if (ret < 0) { + pr_aud_err("AFE close failed\n"); + ret = -EINVAL; + goto fail_cmd; + } + + ret = wait_event_timeout(this_afe.wait, + (atomic_read(&this_afe.state) == 0), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + ret = -EINVAL; + goto fail_cmd; + } +fail_cmd: + return ret; +} + +static int __init afe_init(void) +{ + pr_aud_info("%s:\n", __func__); + init_waitqueue_head(&this_afe.wait); + atomic_set(&this_afe.state, 0); + this_afe.apr = NULL; +#ifdef CONFIG_DEBUG_FS + debugfs_afelb = debugfs_create_file("afe_loopback", + 0644, NULL, (void *) "afe_loopback", + &afe_debug_fops); + + debugfs_afelb_gain = debugfs_create_file("afe_loopback_gain", + 0644, NULL, (void *) "afe_loopback_gain", + &afe_debug_fops); + + +#endif + return 0; +} + +static void __exit afe_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + if (debugfs_afelb) + debugfs_remove(debugfs_afelb); + if (debugfs_afelb_gain) + debugfs_remove(debugfs_afelb_gain); +#endif +} + +device_initcall(afe_init); +__exitcall(afe_exit); diff --git a/arch/arm/mach-msm/qdsp6v3/q6asm.c b/arch/arm/mach-msm/qdsp6v3/q6asm.c new file mode 100644 index 00000000000..33004bfc4a1 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6asm.c @@ -0,0 +1,2548 @@ + +/* + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rtac.h" + +#define TRUE 0x01 +#define FALSE 0x00 +#define READDONE_IDX_STATUS 0 +#define READDONE_IDX_BUFFER 1 +#define READDONE_IDX_SIZE 2 +#define READDONE_IDX_OFFSET 3 +#define READDONE_IDX_MSW_TS 4 +#define READDONE_IDX_LSW_TS 5 +#define READDONE_IDX_FLAGS 6 +#define READDONE_IDX_NUMFRAMES 7 +#define READDONE_IDX_ID 8 + +static DEFINE_MUTEX(session_lock); + +/* session id: 0 reserved */ +static struct audio_client *session[SESSION_MAX+1]; +static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv); +static int32_t q6asm_callback(struct apr_client_data *data, void *priv); +static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg); +static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg); +static int q6asm_memory_map_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt); +static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt); + +static void q6asm_reset_buf_state(struct audio_client *ac); + +struct asm_mmap { + atomic_t ref_cnt; + atomic_t cmd_state; + wait_queue_head_t cmd_wait; + void *apr; +}; + +static struct asm_mmap this_mmap; + +static int q6asm_session_alloc(struct audio_client *ac) +{ + int n; + mutex_lock(&session_lock); + for (n = 1; n <= SESSION_MAX; n++) { + if (!session[n]) { + session[n] = ac; + mutex_unlock(&session_lock); + return n; + } + } + mutex_unlock(&session_lock); + return -ENOMEM; +} + +static void q6asm_session_free(struct audio_client *ac) +{ + pr_debug("%s: sessionid[%d]\n", __func__, ac->session); + mutex_lock(&session_lock); + session[ac->session] = 0; + mutex_unlock(&session_lock); + ac->session = 0; + return; +} + +int q6asm_audio_client_buf_free(unsigned int dir, + struct audio_client *ac) +{ + struct audio_port_data *port; + int cnt = 0; + int rc = 0; + pr_debug("%s: Session id %d\n", __func__, ac->session); + mutex_lock(&ac->cmd_lock); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + if (!port->buf) { + mutex_unlock(&ac->cmd_lock); + return 0; + } + cnt = port->max_buf_cnt - 1; + + if (cnt >= 0) { + rc = q6asm_memory_unmap_regions(ac, dir, + port->buf[0].size, + port->max_buf_cnt); + if (rc < 0) + pr_aud_err("%s CMD Memory_unmap_regions failed\n", + __func__); + } + + while (cnt >= 0) { + if (port->buf[cnt].data) { + pr_debug("data[%p]phys[%p][%p] cnt[%d]\n", + (void *)port->buf[cnt].data, + (void *)port->buf[cnt].phys, + (void *)&port->buf[cnt].phys, cnt); + dma_free_coherent(NULL, port->buf[cnt].size, + port->buf[cnt].data, + port->buf[cnt].phys); + port->buf[cnt].data = NULL; + port->buf[cnt].phys = 0; + --(port->max_buf_cnt); + } + --cnt; + } + kfree(port->buf); + port->buf = NULL; + } + mutex_unlock(&ac->cmd_lock); + return 0; +} + +int q6asm_audio_client_buf_free_contiguous(unsigned int dir, + struct audio_client *ac) +{ + struct audio_port_data *port; + int cnt = 0; + int rc = 0; + pr_debug("%s: Session id %d\n", __func__, ac->session); + mutex_lock(&ac->cmd_lock); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + if (!port->buf) { + mutex_unlock(&ac->cmd_lock); + return 0; + } + cnt = port->max_buf_cnt - 1; + + if (cnt >= 0) { + rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir); + if (rc < 0) + pr_aud_err("%s CMD Memory_unmap_regions failed\n", + __func__); + } + + if (port->buf[0].data) { + pr_debug("%s:data[%p]phys[%p][%p] cnt[%d]\n", + __func__, + (void *)port->buf[0].data, + (void *)port->buf[0].phys, + (void *)&port->buf[0].phys, cnt); + dma_free_coherent(NULL, + port->buf[0].size * port->max_buf_cnt, + port->buf[0].data, + port->buf[0].phys); + } + while (cnt >= 0) { + port->buf[cnt].data = NULL; + port->buf[cnt].phys = 0; + cnt--; + } + port->max_buf_cnt = 0; + kfree(port->buf); + port->buf = NULL; + } + mutex_unlock(&ac->cmd_lock); + return 0; +} + +void q6asm_audio_client_free(struct audio_client *ac) +{ + int loopcnt; + struct audio_port_data *port; + if (!ac || !ac->session) + return; + pr_debug("%s: Session id %d\n", __func__, ac->session); + if (ac->io_mode == SYNC_IO_MODE) { + for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { + port = &ac->port[loopcnt]; + if (!port->buf) + continue; + pr_debug("%s:loopcnt = %d\n", __func__, loopcnt); + q6asm_audio_client_buf_free(loopcnt, ac); + } + } + + apr_deregister(ac->apr); + q6asm_session_free(ac); + + pr_debug("%s: APR De-Register\n", __func__); + if (atomic_read(&this_mmap.ref_cnt) <= 0) { + pr_aud_err("%s: APR Common Port Already Closed\n", __func__); + goto done; + } + + atomic_dec(&this_mmap.ref_cnt); + if (atomic_read(&this_mmap.ref_cnt) == 0) { + apr_deregister(this_mmap.apr); + pr_debug("%s:APR De-Register common port\n", __func__); + } +done: + kfree(ac); + return; +} + +int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode) +{ + if (ac == NULL) { + pr_aud_err("%s APR handle NULL\n", __func__); + return -EINVAL; + } + if ((mode == ASYNC_IO_MODE) || (mode == SYNC_IO_MODE)) { + ac->io_mode = mode; + pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode); + return 0; + } else { + pr_aud_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode); + return -EINVAL; + } +} + +struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv) +{ + struct audio_client *ac; + int n; + int lcnt = 0; + + ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL); + if (!ac) + return NULL; + n = q6asm_session_alloc(ac); + if (n <= 0) + goto fail_session; + ac->session = n; + ac->cb = cb; + ac->priv = priv; + ac->io_mode = SYNC_IO_MODE; + ac->apr = apr_register("ADSP", "ASM", \ + (apr_fn)q6asm_callback,\ + ((ac->session) << 8 | 0x0001),\ + ac); + + if (ac->apr == NULL) { + pr_aud_err("%s Registration with APR failed\n", __func__); + goto fail; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_asm_handle(n, ac->apr); +#endif + pr_debug("%s Registering the common port with APR\n", __func__); + if (atomic_read(&this_mmap.ref_cnt) == 0) { + this_mmap.apr = apr_register("ADSP", "ASM", \ + (apr_fn)q6asm_mmapcallback,\ + 0x0FFFFFFFF, &this_mmap); + if (this_mmap.apr == NULL) { + pr_debug("%s Unable to register \ + APR ASM common port \n", __func__); + goto fail; + } + } + + atomic_inc(&this_mmap.ref_cnt); + init_waitqueue_head(&ac->cmd_wait); + init_waitqueue_head(&ac->time_wait); + atomic_set(&ac->time_flag, 1); + mutex_init(&ac->cmd_lock); + for (lcnt = 0; lcnt <= OUT; lcnt++) { + mutex_init(&ac->port[lcnt].lock); + spin_lock_init(&ac->port[lcnt].dsp_lock); + } + atomic_set(&ac->cmd_state, 0); + + pr_debug("%s: session[%d]\n", __func__, ac->session); + + return ac; +fail: + q6asm_audio_client_free(ac); + return NULL; +fail_session: + kfree(ac); + return NULL; +} + +int q6asm_audio_client_buf_alloc(unsigned int dir, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt) +{ + int cnt = 0; + int rc = 0; + struct audio_buffer *buf; + + if (!(ac) || ((dir != IN) && (dir != OUT))) + return -EINVAL; + + pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, + bufsz, bufcnt); + + if (ac->session <= 0 || ac->session > 8) + goto fail; + + if (ac->io_mode == SYNC_IO_MODE) { + if (ac->port[dir].buf) { + pr_debug("%s: buffer already allocated\n", __func__); + return 0; + } + mutex_lock(&ac->cmd_lock); + buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), + GFP_KERNEL); + + if (!buf) { + mutex_unlock(&ac->cmd_lock); + goto fail; + } + + ac->port[dir].buf = buf; + + while (cnt < bufcnt) { + if (bufsz > 0) { + buf[cnt].data = dma_alloc_coherent(NULL, bufsz, + &buf[cnt].phys, + GFP_KERNEL); + if (!buf[cnt].data) { + pr_aud_err("%s Buf alloc failed for" + " size=%d\n", __func__, + bufsz); + mutex_unlock(&ac->cmd_lock); + goto fail; + } + buf[cnt].used = 1; + buf[cnt].size = bufsz; + buf[cnt].actual_size = bufsz; + pr_debug("%s data[%p]phys[%p][%p]\n", __func__, + (void *)buf[cnt].data, + (void *)buf[cnt].phys, + (void *)&buf[cnt].phys); + } + cnt++; + } + ac->port[dir].max_buf_cnt = cnt; + + mutex_unlock(&ac->cmd_lock); + rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt); + if (rc < 0) { + pr_aud_err("%s:CMD Memory_map_regions failed\n", __func__); + goto fail; + } + } + return 0; +fail: + q6asm_audio_client_buf_free(dir, ac); + return -EINVAL; +} + +int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, + struct audio_client *ac, + unsigned int bufsz, + unsigned int bufcnt) +{ + int cnt = 0; + int rc = 0; + struct audio_buffer *buf; + + if (!(ac) || ((dir != IN) && (dir != OUT))) + return -EINVAL; + + pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", + __func__, ac->session, + bufsz, bufcnt); + + if (ac->session <= 0 || ac->session > 8) + goto fail; + + if (ac->io_mode == SYNC_IO_MODE) { + if (ac->port[dir].buf) { + pr_debug("%s: buffer already allocated\n", __func__); + return 0; + } + mutex_lock(&ac->cmd_lock); + buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt), + GFP_KERNEL); + + if (!buf) { + mutex_unlock(&ac->cmd_lock); + goto fail; + } + + ac->port[dir].buf = buf; + + buf[0].data = dma_alloc_coherent(NULL, bufsz * bufcnt, + &buf[0].phys, GFP_KERNEL); + buf[0].used = dir ^ 1; + buf[0].size = bufsz; + buf[0].actual_size = bufsz; + cnt = 1; + while (cnt < bufcnt) { + if (bufsz > 0) { + buf[cnt].data = buf[0].data + (cnt * bufsz); + buf[cnt].phys = buf[0].phys + (cnt * bufsz); + + if (!buf[cnt].data) { + pr_aud_err("%s Buf alloc failed\n", + __func__); + mutex_unlock(&ac->cmd_lock); + goto fail; + } + buf[cnt].used = dir ^ 1; + buf[cnt].size = bufsz; + buf[cnt].actual_size = bufsz; + pr_debug("%s data[%p]phys[%p][%p]\n", __func__, + (void *)buf[cnt].data, + (void *)buf[cnt].phys, + (void *)&buf[cnt].phys); + } + cnt++; + } + ac->port[dir].max_buf_cnt = cnt; + + mutex_unlock(&ac->cmd_lock); + rc = q6asm_memory_map(ac, buf[0].phys, dir, bufsz, cnt); + if (rc < 0) { + pr_aud_err("%s:CMD Memory_map_regions failed\n", __func__); + goto fail; + } + } + return 0; +fail: + q6asm_audio_client_buf_free_contiguous(dir, ac); + return -EINVAL; +} + +static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv) +{ + uint32_t token; + uint32_t *payload = data->payload; + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s: Reset event is received: %d %d apr[%p]\n", + __func__, + data->reset_event, + data->reset_proc, + this_mmap.apr); + apr_reset(this_mmap.apr); + this_mmap.apr = NULL; + atomic_set(&this_mmap.cmd_state, 0); + return 0; + } + + pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x]" + "token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, + payload[0], payload[1], data->opcode, data->token, + data->payload_size, data->src_port, data->dest_port); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + token = data->token; + switch (payload[0]) { + case ASM_SESSION_CMD_MEMORY_MAP: + case ASM_SESSION_CMD_MEMORY_UNMAP: + case ASM_SESSION_CMD_MEMORY_MAP_REGIONS: + case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS: + pr_debug("%s:command[0x%x]success [0x%x]\n", + __func__, payload[0], payload[1]); + if (atomic_read(&this_mmap.cmd_state)) { + atomic_set(&this_mmap.cmd_state, 0); + wake_up(&this_mmap.cmd_wait); + } + break; + default: + pr_debug("%s:command[0x%x] not expecting rsp\n", + __func__, payload[0]); + break; + } + } + return 0; +} + + +static int32_t q6asm_callback(struct apr_client_data *data, void *priv) +{ + int i = 0; + struct audio_client *ac = (struct audio_client *)priv; + uint32_t token; + unsigned long dsp_flags; + uint32_t *payload; + + + if ((ac == NULL) || (data == NULL)) { + pr_aud_err("ac or priv NULL\n"); + return -EINVAL; + } + if (ac->session <= 0 || ac->session > 8) { + pr_aud_err("%s:Session ID is invalid, session = %d\n", __func__, + ac->session); + return -EINVAL; + } + + payload = data->payload; + + if (data->opcode == RESET_EVENTS) { + pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n", + data->reset_event, data->reset_proc, ac->apr); + apr_reset(ac->apr); + return 0; + } + + pr_debug("%s: session[%d]opcode[0x%x] \ + token[0x%x]payload_s[%d] src[%d] dest[%d]\n", __func__, + ac->session, data->opcode, + data->token, data->payload_size, data->src_port, + data->dest_port); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + token = data->token; + switch (payload[0]) { + case ASM_STREAM_CMD_SET_PP_PARAMS: +#ifdef CONFIG_MSM8X60_RTAC + if (rtac_make_asm_callback(ac->session, payload, + data->payload_size)) + break; +#endif + case ASM_SESSION_CMD_PAUSE: + case ASM_DATA_CMD_EOS: + case ASM_STREAM_CMD_CLOSE: + case ASM_STREAM_CMD_FLUSH: + case ASM_SESSION_CMD_RUN: + case ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS: + pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]); + if (token != ac->session) { + pr_aud_err("%s:Invalid session[%d] rxed expected[%d]", + __func__, token, ac->session); + return -EINVAL; + } + case ASM_STREAM_CMD_OPEN_READ: + case ASM_STREAM_CMD_OPEN_WRITE: + case ASM_STREAM_CMD_OPEN_READWRITE: + case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE: + case ASM_STREAM_CMD_SET_ENCDEC_PARAM: + if (atomic_read(&ac->cmd_state)) { + atomic_set(&ac->cmd_state, 0); + wake_up(&ac->cmd_wait); + } + if (ac->cb) + ac->cb(data->opcode, data->token, + (uint32_t *)data->payload, ac->priv); + break; + default: + pr_debug("%s:command[0x%x] not expecting rsp\n", + __func__, payload[0]); + break; + } + return 0; + } + + switch (data->opcode) { + case ASM_DATA_EVENT_WRITE_DONE:{ + struct audio_port_data *port = &ac->port[IN]; + pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]", + __func__, payload[0], payload[1], + data->token); + if (ac->io_mode == SYNC_IO_MODE) { + if (port->buf == NULL) { + pr_aud_err("%s: Unexpected Write Done\n", + __func__); + return -EINVAL; + } + spin_lock_irqsave(&port->dsp_lock, dsp_flags); + if (port->buf[data->token].phys != + payload[0]) { + pr_aud_err("Buf expected[%p]rxed[%p]\n",\ + (void *)port->buf[data->token].phys,\ + (void *)payload[0]); + spin_unlock_irqrestore(&port->dsp_lock, + dsp_flags); + return -EINVAL; + } + token = data->token; + port->buf[token].used = 1; + spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + for (i = 0; i < port->max_buf_cnt; i++) + pr_debug("%d ", port->buf[i].used); + + } + break; + } +#ifdef CONFIG_MSM8X60_RTAC + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + rtac_make_asm_callback(ac->session, payload, + data->payload_size); + break; +#endif + case ASM_DATA_EVENT_READ_DONE:{ + + struct audio_port_data *port = &ac->port[OUT]; + + pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n", + __func__, payload[READDONE_IDX_STATUS], + payload[READDONE_IDX_BUFFER], + payload[READDONE_IDX_SIZE], + payload[READDONE_IDX_OFFSET]); + pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d flags=%d id=%d num=%d\n", + __func__, payload[READDONE_IDX_MSW_TS], + payload[READDONE_IDX_LSW_TS], + payload[READDONE_IDX_FLAGS], + payload[READDONE_IDX_ID], + payload[READDONE_IDX_NUMFRAMES]); + + if (ac->io_mode == SYNC_IO_MODE) { + if (port->buf == NULL) { + pr_aud_err("%s: Unexpected Write Done\n", __func__); + return -EINVAL; + } + spin_lock_irqsave(&port->dsp_lock, dsp_flags); + token = data->token; + port->buf[token].used = 0; + if (port->buf[token].phys != + payload[READDONE_IDX_BUFFER]) { + pr_aud_err("Buf expected[%p]rxed[%p]\n",\ + (void *)port->buf[token].phys,\ + (void *)payload[READDONE_IDX_BUFFER]); + spin_unlock_irqrestore(&port->dsp_lock, + dsp_flags); + break; + } + port->buf[token].actual_size = + payload[READDONE_IDX_SIZE]; + spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + } + break; + } + case ASM_DATA_EVENT_EOS: + case ASM_DATA_CMDRSP_EOS: + pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n", + __func__, data->opcode); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("ASM_SESSION_EVENT_TX_OVERFLOW\n"); + break; + case ASM_SESSION_CMDRSP_GET_SESSION_TIME: + pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, " + "payload[0] = %d, payload[1] = %d, " + "payload[2] = %d\n", __func__, + payload[0], payload[1], payload[2]); + ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) | + payload[2]); + if (atomic_read(&ac->time_flag)) { + atomic_set(&ac->time_flag, 0); + wake_up(&ac->time_wait); + } + break; + + } + if (ac->cb) + ac->cb(data->opcode, data->token, + data->payload, ac->priv); + + return 0; +} + +void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size, + uint32_t *index) +{ + void *data; + unsigned char idx; + struct audio_port_data *port; + + if (!ac || ((dir != IN) && (dir != OUT))) + return NULL; + + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + + mutex_lock(&port->lock); + idx = port->cpu_buf; + if (port->buf == NULL) { + pr_debug("%s:Buffer pointer null\n", __func__); + return NULL; + } + /* dir 0: used = 0 means buf in use + dir 1: used = 1 means buf in use */ + if (port->buf[idx].used == dir) { + /* To make it more robust, we could loop and get the + next avail buf, its risky though */ + pr_debug("%s:Next buf idx[0x%x] not available,\ + dir[%d]\n", __func__, idx, dir); + mutex_unlock(&port->lock); + return NULL; + } + *size = port->buf[idx].actual_size; + *index = port->cpu_buf; + data = port->buf[idx].data; + pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n", + __func__, + ac->session, + port->cpu_buf, + data, *size); + /* By default increase the cpu_buf cnt + user accesses this function,increase cpu + buf(to avoid another api)*/ + port->buf[idx].used = dir; + port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1)); + mutex_unlock(&port->lock); + return data; + } + return NULL; +} + +int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac) +{ + int ret = -1; + struct audio_port_data *port; + uint32_t idx; + + if (!ac || (dir != OUT)) + return ret; + + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[dir]; + + mutex_lock(&port->lock); + idx = port->dsp_buf; + + if (port->buf[idx].used == (dir ^ 1)) { + /* To make it more robust, we could loop and get the + next avail buf, its risky though */ + pr_aud_err("Next buf idx[0x%x] not available, dir[%d]\n", + idx, dir); + mutex_unlock(&port->lock); + return ret; + } + pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__, + ac->session, port->dsp_buf, port->cpu_buf); + ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1); + mutex_unlock(&port->lock); + } + return ret; +} + +static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg) +{ + pr_debug("%s:session=%d pkt size=%d cmd_flg=%d\n", __func__, pkt_size, + cmd_flg, ac->session); + mutex_lock(&ac->cmd_lock); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(sizeof(struct apr_hdr)),\ + APR_PKT_VER); + hdr->src_svc = ((struct apr_svc *)ac->apr)->id; + hdr->src_domain = APR_DOMAIN_APPS; + hdr->dest_svc = APR_SVC_ASM; + hdr->dest_domain = APR_DOMAIN_ADSP; + hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; + hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; + if (cmd_flg) { + hdr->token = ac->session; + atomic_set(&ac->cmd_state, 1); + } + hdr->pkt_size = pkt_size; + mutex_unlock(&ac->cmd_lock); + return; +} + +static void q6asm_add_mmaphdr(struct apr_hdr *hdr, uint32_t pkt_size, + uint32_t cmd_flg) +{ + pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + hdr->src_port = 0; + hdr->dest_port = 0; + if (cmd_flg) { + hdr->token = 0; + atomic_set(&this_mmap.cmd_state, 1); + } + hdr->pkt_size = pkt_size; + return; +} + +int q6asm_open_read(struct audio_client *ac, + uint32_t format) +{ + int rc = 0x00; + struct asm_stream_cmd_open_read open; + + if ((ac == NULL) || (ac->apr == NULL)) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + pr_debug("%s:session[%d]", __func__, ac->session); + + q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); + open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ; + /* Stream prio : High, provide meta info with encoded frames */ + open.src_endpoint = ASM_END_POINT_DEVICE_MATRIX; + open.pre_proc_top = DEFAULT_POPP_TOPOLOGY; + + switch (format) { + case FORMAT_LINEAR_PCM: + open.uMode = STREAM_PRIORITY_HIGH; + open.format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = MPEG4_AAC; + break; + case FORMAT_V13K: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = V13K_FS; + break; + case FORMAT_EVRC: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = EVRC_FS; + break; + case FORMAT_AMRNB: + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH; + open.format = AMRNB_FS; + break; + default: + pr_aud_err("Invalid format[%d]\n", format); + goto fail_cmd; + } + rc = apr_send_pkt(ac->apr, (uint32_t *) &open); + if (rc < 0) { + pr_aud_err("open failed op[0x%x]rc[%d]\n", \ + open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__, + rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_open_write(struct audio_client *ac, uint32_t format) +{ + int rc = 0x00; + struct asm_stream_cmd_open_write open; + + if ((ac == NULL) || (ac->apr == NULL)) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session, + format); + + q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); + + open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE; + open.uMode = STREAM_PRIORITY_HIGH; + /* source endpoint : matrix */ + open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX; + open.stream_handle = 0x00; + open.post_proc_top = DEFAULT_POPP_TOPOLOGY; + + switch (format) { + case FORMAT_LINEAR_PCM: + open.format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.format = MPEG4_AAC; + break; + case FORMAT_WMA_V9: + open.format = WMA_V9; + break; + case FORMAT_WMA_V10PRO: + open.format = WMA_V10PRO; + break; + default: + pr_aud_err("%s: Invalid format[%d]\n", __func__, format); + goto fail_cmd; + } + rc = apr_send_pkt(ac->apr, (uint32_t *) &open); + if (rc < 0) { + pr_aud_err("%s: open failed op[0x%x]rc[%d]\n", \ + __func__, open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout. waited for OPEN_WRITR rc[%d]\n", __func__, + rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_open_read_write(struct audio_client *ac, + uint32_t rd_format, + uint32_t wr_format) +{ + int rc = 0x00; + struct asm_stream_cmd_open_read_write open; + + if ((ac == NULL) || (ac->apr == NULL)) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: session[%d]", __func__, ac->session); + pr_debug("wr_format[0x%x]rd_format[0x%x]", + wr_format, rd_format); + + q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE); + open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE; + + open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_NORMAL; + /* source endpoint : matrix */ + open.post_proc_top = DEFAULT_POPP_TOPOLOGY; + switch (wr_format) { + case FORMAT_LINEAR_PCM: + open.write_format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.write_format = MPEG4_AAC; + break; + case FORMAT_WMA_V9: + open.write_format = WMA_V9; + break; + case FORMAT_WMA_V10PRO: + open.write_format = WMA_V10PRO; + break; + default: + pr_aud_err("Invalid format[%d]\n", wr_format); + goto fail_cmd; + } + + switch (rd_format) { + case FORMAT_LINEAR_PCM: + open.read_format = LINEAR_PCM; + break; + case FORMAT_MPEG4_AAC: + open.read_format = MPEG4_AAC; + break; + case FORMAT_V13K: + open.read_format = V13K_FS; + break; + case FORMAT_EVRC: + open.read_format = EVRC_FS; + break; + case FORMAT_AMRNB: + open.read_format = AMRNB_FS; + break; + default: + pr_aud_err("Invalid format[%d]\n", rd_format); + goto fail_cmd; + } + pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__, + open.read_format, open.write_format); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &open); + if (rc < 0) { + pr_aud_err("open failed op[0x%x]rc[%d]\n", \ + open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for OPEN_WRITR rc[%d]\n", rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_run(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts) +{ + struct asm_stream_cmd_run run; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s session[%d]", __func__, ac->session); + q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE); + + run.hdr.opcode = ASM_SESSION_CMD_RUN; + run.flags = flags; + run.msw_ts = msw_ts; + run.lsw_ts = lsw_ts; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &run); + if (rc < 0) { + pr_aud_err("Commmand run failed[%d]", rc); + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for run success rc[%d]", rc); + goto fail_cmd; + } + + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts) +{ + struct asm_stream_cmd_run run; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("%s:APR handle NULL\n", __func__); + return -EINVAL; + } + pr_debug("session[%d]", ac->session); + q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE); + + run.hdr.opcode = ASM_SESSION_CMD_RUN; + run.flags = flags; + run.msw_ts = msw_ts; + run.lsw_ts = lsw_ts; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &run); + if (rc < 0) { + pr_aud_err("%s:Commmand run failed[%d]", __func__, rc); + return -EINVAL; + } + return 0; +} + + +int q6asm_enc_cfg_blk_aac(struct audio_client *ac, + uint32_t frames_per_buf, + uint32_t sample_rate, uint32_t channels, + uint32_t bit_rate, uint32_t mode, uint32_t format) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d]" + "format[%d]", __func__, ac->session, frames_per_buf, + sample_rate, channels, bit_rate, mode, format); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = MPEG4_AAC; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_aac_read_cfg); + enc_cfg.enc_blk.cfg.aac.bitrate = bit_rate; + enc_cfg.enc_blk.cfg.aac.enc_mode = mode; + enc_cfg.enc_blk.cfg.aac.format = format; + enc_cfg.enc_blk.cfg.aac.ch_cfg = channels; + enc_cfg.enc_blk.cfg.aac.sample_rate = sample_rate; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + + int rc = 0; + + pr_debug("%s: Session %d\n", __func__, ac->session); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + enc_cfg.enc_blk.frames_per_buf = 1; + enc_cfg.enc_blk.format_id = LINEAR_PCM; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg); + enc_cfg.enc_blk.cfg.pcm.ch_cfg = channels; + enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16; + enc_cfg.enc_blk.cfg.pcm.sample_rate = rate; + enc_cfg.enc_blk.cfg.pcm.is_signed = 1; + enc_cfg.enc_blk.cfg.pcm.interleaved = 1; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd open failed\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enable_sbrps(struct audio_client *ac, + uint32_t sbr_ps_enable) +{ + struct asm_stream_cmd_encdec_sbr sbrps; + + int rc = 0; + + pr_debug("%s: Session %d\n", __func__, ac->session); + + q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE); + + sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + sbrps.param_id = ASM_ENABLE_SBR_PS; + sbrps.param_size = sizeof(struct asm_sbr_ps); + sbrps.sbr_ps.enable = sbr_ps_enable; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps); + if (rc < 0) { + pr_aud_err("Command opcode[0x%x]paramid[0x%x] failed\n", + ASM_STREAM_CMD_SET_ENCDEC_PARAM, + ASM_ENABLE_SBR_PS); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout opcode[0x%x] ", sbrps.hdr.opcode); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t reduced_rate_level, uint16_t rate_modulation_cmd) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \ + reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]", __func__, + ac->session, frames_per_buf, min_rate, max_rate, + reduced_rate_level, rate_modulation_cmd); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = V13K_FS; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_qcelp13_read_cfg); + enc_cfg.enc_blk.cfg.qcelp13.min_rate = min_rate; + enc_cfg.enc_blk.cfg.qcelp13.max_rate = max_rate; + enc_cfg.enc_blk.cfg.qcelp13.reduced_rate_level = reduced_rate_level; + enc_cfg.enc_blk.cfg.qcelp13.rate_modulation_cmd = rate_modulation_cmd; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t min_rate, uint16_t max_rate, + uint16_t rate_modulation_cmd) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] \ + rate_modulation_cmd[0x%4x]", __func__, ac->session, + frames_per_buf, min_rate, max_rate, rate_modulation_cmd); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = EVRC_FS; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_evrc_read_cfg); + enc_cfg.enc_blk.cfg.evrc.min_rate = min_rate; + enc_cfg.enc_blk.cfg.evrc.max_rate = max_rate; + enc_cfg.enc_blk.cfg.evrc.rate_modulation_cmd = rate_modulation_cmd; + enc_cfg.enc_blk.cfg.evrc.reserved = 0; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf, + uint16_t band_mode, uint16_t dtx_enable) +{ + struct asm_stream_cmd_encdec_cfg_blk enc_cfg; + int rc = 0; + + pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]", + __func__, ac->session, frames_per_buf, band_mode, dtx_enable); + + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + + enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID; + enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk); + + enc_cfg.enc_blk.frames_per_buf = frames_per_buf; + enc_cfg.enc_blk.format_id = AMRNB_FS; + enc_cfg.enc_blk.cfg_size = sizeof(struct asm_amrnb_read_cfg); + enc_cfg.enc_blk.cfg.amrnb.mode = band_mode; + enc_cfg.enc_blk.cfg.amrnb.dtx_mode = dtx_enable; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_aud_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for FORMAT_UPDATE\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_pcm(struct audio_client *ac, + uint32_t rate, uint32_t channels) +{ + struct asm_stream_media_format_update fmt; + int rc = 0; + + pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate, + channels); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = LINEAR_PCM; + fmt.cfg_size = sizeof(struct asm_pcm_cfg); + fmt.write_cfg.pcm_cfg.ch_cfg = channels; + fmt.write_cfg.pcm_cfg.bits_per_sample = 16; + fmt.write_cfg.pcm_cfg.sample_rate = rate; + fmt.write_cfg.pcm_cfg.is_signed = 1; + fmt.write_cfg.pcm_cfg.interleaved = 1; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_aac(struct audio_client *ac, + struct asm_aac_cfg *cfg) +{ + struct asm_stream_media_format_update fmt; + int rc = 0; + + pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, + cfg->sample_rate, cfg->ch_cfg); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = MPEG4_AAC; + fmt.cfg_size = sizeof(struct asm_aac_cfg); + fmt.write_cfg.aac_cfg.format = cfg->format; + fmt.write_cfg.aac_cfg.aot = cfg->aot; + fmt.write_cfg.aac_cfg.ep_config = cfg->ep_config; + fmt.write_cfg.aac_cfg.section_data_resilience = + cfg->section_data_resilience; + fmt.write_cfg.aac_cfg.scalefactor_data_resilience = + cfg->scalefactor_data_resilience; + fmt.write_cfg.aac_cfg.spectral_data_resilience = + cfg->spectral_data_resilience; + fmt.write_cfg.aac_cfg.ch_cfg = cfg->ch_cfg; + fmt.write_cfg.aac_cfg.sample_rate = cfg->sample_rate; + pr_aud_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n", + __func__, fmt.format, fmt.cfg_size, + fmt.write_cfg.aac_cfg.format, + fmt.write_cfg.aac_cfg.aot, + fmt.write_cfg.aac_cfg.ch_cfg, + fmt.write_cfg.aac_cfg.sample_rate); + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_wma(struct audio_client *ac, + void *cfg) +{ + struct asm_stream_media_format_update fmt; + struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg; + int rc = 0; + + pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d],\ + balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n", + ac->session, wma_cfg->format_tag, wma_cfg->sample_rate, + wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec, + wma_cfg->block_align, wma_cfg->valid_bits_per_sample, + wma_cfg->ch_mask, wma_cfg->encode_opt); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = WMA_V9; + fmt.cfg_size = sizeof(struct asm_wma_cfg); + fmt.write_cfg.wma_cfg.format_tag = wma_cfg->format_tag; + fmt.write_cfg.wma_cfg.ch_cfg = wma_cfg->ch_cfg; + fmt.write_cfg.wma_cfg.sample_rate = wma_cfg->sample_rate; + fmt.write_cfg.wma_cfg.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec; + fmt.write_cfg.wma_cfg.block_align = wma_cfg->block_align; + fmt.write_cfg.wma_cfg.valid_bits_per_sample = + wma_cfg->valid_bits_per_sample; + fmt.write_cfg.wma_cfg.ch_mask = wma_cfg->ch_mask; + fmt.write_cfg.wma_cfg.encode_opt = wma_cfg->encode_opt; + fmt.write_cfg.wma_cfg.adv_encode_opt = 0; + fmt.write_cfg.wma_cfg.adv_encode_opt2 = 0; + fmt.write_cfg.wma_cfg.drc_peak_ref = 0; + fmt.write_cfg.wma_cfg.drc_peak_target = 0; + fmt.write_cfg.wma_cfg.drc_ave_ref = 0; + fmt.write_cfg.wma_cfg.drc_ave_target = 0; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_media_format_block_wmapro(struct audio_client *ac, + void *cfg) +{ + struct asm_stream_media_format_update fmt; + struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg; + int rc = 0; + + pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d]," + "balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x],\ + adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n", + ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate, + wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec, + wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample, + wmapro_cfg->ch_mask, wmapro_cfg->encode_opt, + wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2); + + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE; + + fmt.format = WMA_V10PRO; + fmt.cfg_size = sizeof(struct asm_wmapro_cfg); + fmt.write_cfg.wmapro_cfg.format_tag = wmapro_cfg->format_tag; + fmt.write_cfg.wmapro_cfg.ch_cfg = wmapro_cfg->ch_cfg; + fmt.write_cfg.wmapro_cfg.sample_rate = wmapro_cfg->sample_rate; + fmt.write_cfg.wmapro_cfg.avg_bytes_per_sec = + wmapro_cfg->avg_bytes_per_sec; + fmt.write_cfg.wmapro_cfg.block_align = wmapro_cfg->block_align; + fmt.write_cfg.wmapro_cfg.valid_bits_per_sample = + wmapro_cfg->valid_bits_per_sample; + fmt.write_cfg.wmapro_cfg.ch_mask = wmapro_cfg->ch_mask; + fmt.write_cfg.wmapro_cfg.encode_opt = wmapro_cfg->encode_opt; + fmt.write_cfg.wmapro_cfg.adv_encode_opt = wmapro_cfg->adv_encode_opt; + fmt.write_cfg.wmapro_cfg.adv_encode_opt2 = wmapro_cfg->adv_encode_opt2; + fmt.write_cfg.wmapro_cfg.drc_peak_ref = 0; + fmt.write_cfg.wmapro_cfg.drc_peak_target = 0; + fmt.write_cfg.wmapro_cfg.drc_ave_ref = 0; + fmt.write_cfg.wmapro_cfg.drc_ave_target = 0; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_aud_err("%s:Comamnd open failed\n", __func__); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir, + uint32_t bufsz, uint32_t bufcnt) +{ + struct asm_stream_cmd_memory_map mem_map; + int rc = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + mem_map.hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP; + + mem_map.buf_add = buf_add; + mem_map.buf_size = bufsz * bufcnt; + mem_map.mempool_id = 0; /* EBI */ + mem_map.reserved = 0; + + q6asm_add_mmaphdr(&mem_map.hdr, + sizeof(struct asm_stream_cmd_memory_map), TRUE); + + pr_debug("buf add[%x] buf_add_parameter[%x]\n", + mem_map.buf_add, buf_add); + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_map); + if (rc < 0) { + pr_aud_err("mem_map op[0x%x]rc[%d]\n", + mem_map.hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_map\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + return rc; +} + +int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir) +{ + struct asm_stream_cmd_memory_unmap mem_unmap; + int rc = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + q6asm_add_mmaphdr(&mem_unmap.hdr, + sizeof(struct asm_stream_cmd_memory_unmap), TRUE); + mem_unmap.hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP; + mem_unmap.buf_add = buf_add; + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap); + if (rc < 0) { + pr_aud_err("mem_unmap op[0x%x]rc[%d]\n", + mem_unmap.hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_map\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + return rc; +} + +int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_lrchannel_gain_params *lrgain = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_lrchannel_gain_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_lrchannel_gain_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = L_R_CHANNEL_GAIN_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_lrchannel_gain_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + lrgain = (struct asm_lrchannel_gain_params *)payload; + + lrgain->left_gain = left_gain; + lrgain->right_gain = right_gain; + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Volume Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending volume command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +static int q6asm_memory_map_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt) +{ + struct asm_stream_cmd_memory_map_regions *mmap_regions = NULL; + struct asm_memory_map_regions *mregions = NULL; + struct audio_port_data *port = NULL; + struct audio_buffer *ab = NULL; + void *mmap_region_cmd = NULL; + void *payload = NULL; + int rc = 0; + int i = 0; + int cmd_size = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + cmd_size = sizeof(struct asm_stream_cmd_memory_map_regions) + + sizeof(struct asm_memory_map_regions) * bufcnt; + + mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (mmap_region_cmd == NULL) { + pr_aud_info("mmap_region_cmd == NULL\n"); + return -EINVAL; + } + mmap_regions = (struct asm_stream_cmd_memory_map_regions *) + mmap_region_cmd; + q6asm_add_mmaphdr(&mmap_regions->hdr, cmd_size, TRUE); + mmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP_REGIONS; + mmap_regions->mempool_id = 0; + mmap_regions->nregions = bufcnt & 0x00ff; + pr_debug("map_regions->nregions = %d\n", mmap_regions->nregions); + payload = ((u8 *) mmap_region_cmd + + sizeof(struct asm_stream_cmd_memory_map_regions)); + mregions = (struct asm_memory_map_regions *)payload; + + port = &ac->port[dir]; + for (i = 0; i < bufcnt; i++) { + ab = &port->buf[i]; + mregions->phys = ab->phys; + mregions->buf_size = ab->size; + ++mregions; + } + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) mmap_region_cmd); + if (rc < 0) { + pr_aud_err("mmap_regions op[0x%x]rc[%d]\n", + mmap_regions->hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_map\n"); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(mmap_region_cmd); + return rc; +} + +static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir, + uint32_t bufsz, uint32_t bufcnt) +{ + struct asm_stream_cmd_memory_unmap_regions *unmap_regions = NULL; + struct asm_memory_unmap_regions *mregions = NULL; + struct audio_port_data *port = NULL; + struct audio_buffer *ab = NULL; + void *unmap_region_cmd = NULL; + void *payload = NULL; + int rc = 0; + int i = 0; + int cmd_size = 0; + + if (!ac || ac->apr == NULL || this_mmap.apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: Session[%d]\n", __func__, ac->session); + + cmd_size = sizeof(struct asm_stream_cmd_memory_unmap_regions) + + sizeof(struct asm_memory_unmap_regions) * bufcnt; + + unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); + if (unmap_region_cmd == NULL) { + pr_aud_info("unmap_region_cmd == NULL\n"); + return -EINVAL; + } + unmap_regions = (struct asm_stream_cmd_memory_unmap_regions *) + unmap_region_cmd; + q6asm_add_mmaphdr(&unmap_regions->hdr, cmd_size, TRUE); + unmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS; + unmap_regions->nregions = bufcnt & 0x00ff; + pr_debug("unmap_regions->nregions = %d\n", unmap_regions->nregions); + payload = ((u8 *) unmap_region_cmd + + sizeof(struct asm_stream_cmd_memory_unmap_regions)); + mregions = (struct asm_memory_unmap_regions *)payload; + port = &ac->port[dir]; + for (i = 0; i < bufcnt; i++) { + ab = &port->buf[i]; + mregions->phys = ab->phys; + ++mregions; + } + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) unmap_region_cmd); + if (rc < 0) { + pr_aud_err("mmap_regions op[0x%x]rc[%d]\n", + unmap_regions->hdr.opcode, rc); + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for memory_unmap\n"); + goto fail_cmd; + } + rc = 0; + +fail_cmd: + kfree(unmap_region_cmd); + return rc; +} + +int q6asm_set_mute(struct audio_client *ac, int muteflag) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_mute_params *mute = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_mute_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_mute_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = MUTE_CONFIG_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_mute_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + mute = (struct asm_mute_params *)payload; + + mute->muteflag = muteflag; + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Mute Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending mute command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +int q6asm_set_volume(struct audio_client *ac, int volume) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_master_gain_params *mgain = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_master_gain_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_master_gain_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = MASTER_GAIN_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_master_gain_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + mgain = (struct asm_master_gain_params *)payload; + + mgain->master_gain = volume; + mgain->padding = 0x00; + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Volume Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending volume command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +int q6asm_set_softpause(struct audio_client *ac, + struct asm_softpause_params *pause_param) +{ + void *vol_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_softpause_params *params = NULL; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_softpause_params); + vol_cmd = kzalloc(sz, GFP_KERNEL); + if (vol_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + return rc; + } + cmd = (struct asm_pp_params_command *)vol_cmd; + q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_softpause_params); + cmd->params.module_id = VOLUME_CONTROL_MODULE_ID; + cmd->params.param_id = SOFT_PAUSE_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_softpause_params); + cmd->params.reserved = 0; + + payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command)); + params = (struct asm_softpause_params *)payload; + + params->enable = pause_param->enable; + params->period = pause_param->period; + params->step = pause_param->step; + params->rampingcurve = pause_param->rampingcurve; + pr_debug("%s: soft Pause Command: enable = %d, period = %d," + "step = %d, curve = %d\n", __func__, params->enable, + params->period, params->step, params->rampingcurve); + rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd); + if (rc < 0) { + pr_aud_err("%s: Volume Command(soft_pause) failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending volume command(soft_pause)" + "to apr\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(vol_cmd); + return rc; +} + +int q6asm_equalizer(struct audio_client *ac, void *eq) +{ + void *eq_cmd = NULL; + void *payload = NULL; + struct asm_pp_params_command *cmd = NULL; + struct asm_equalizer_params *equalizer = NULL; + struct msm_audio_eq_stream_config *eq_params = NULL; + int i = 0; + int sz = 0; + int rc = 0; + + sz = sizeof(struct asm_pp_params_command) + + + sizeof(struct asm_equalizer_params); + eq_cmd = kzalloc(sz, GFP_KERNEL); + if (eq_cmd == NULL) { + pr_aud_err("%s[%d]: Mem alloc failed\n", __func__, ac->session); + rc = -EINVAL; + goto fail_cmd; + } + eq_params = (struct msm_audio_eq_stream_config *) eq; + cmd = (struct asm_pp_params_command *)eq_cmd; + q6asm_add_hdr(ac, &cmd->hdr, sz, TRUE); + cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS; + cmd->payload = NULL; + cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + + sizeof(struct asm_equalizer_params); + cmd->params.module_id = EQUALIZER_MODULE_ID; + cmd->params.param_id = EQUALIZER_PARAM_ID; + cmd->params.param_size = sizeof(struct asm_equalizer_params); + cmd->params.reserved = 0; + payload = (u8 *)(eq_cmd + sizeof(struct asm_pp_params_command)); + equalizer = (struct asm_equalizer_params *)payload; + + equalizer->enable = eq_params->enable; + equalizer->num_bands = eq_params->num_bands; + pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable, + eq_params->num_bands); + for (i = 0; i < eq_params->num_bands; i++) { + equalizer->eq_bands[i].band_idx = + eq_params->eq_bands[i].band_idx; + equalizer->eq_bands[i].filter_type = + eq_params->eq_bands[i].filter_type; + equalizer->eq_bands[i].center_freq_hz = + eq_params->eq_bands[i].center_freq_hz; + equalizer->eq_bands[i].filter_gain = + eq_params->eq_bands[i].filter_gain; + equalizer->eq_bands[i].q_factor = + eq_params->eq_bands[i].q_factor; + pr_debug("%s: filter_type:%u bandnum:%d\n", __func__, + eq_params->eq_bands[i].filter_type, i); + pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__, + eq_params->eq_bands[i].center_freq_hz, i); + pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__, + eq_params->eq_bands[i].filter_gain, i); + pr_debug("%s: q_factor:%d bandnum:%d\n", __func__, + eq_params->eq_bands[i].q_factor, i); + } + rc = apr_send_pkt(ac->apr, (uint32_t *) eq_cmd); + if (rc < 0) { + pr_aud_err("%s: Equalizer Command failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in sending equalizer command to apr\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = 0; +fail_cmd: + kfree(eq_cmd); + return rc; +} + +int q6asm_read(struct audio_client *ac) +{ + struct asm_stream_cmd_read read; + struct audio_buffer *ab; + int dsp_buf; + struct audio_port_data *port; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[OUT]; + + q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE); + + mutex_lock(&port->lock); + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", + __func__, + ac->session, + dsp_buf, + (void *)port->buf[dsp_buf].data, + port->cpu_buf, + (void *)port->buf[port->cpu_buf].phys); + + read.hdr.opcode = ASM_DATA_CMD_READ; + read.buf_add = ab->phys; + read.buf_size = ab->size; + read.uid = port->dsp_buf; + read.hdr.token = port->dsp_buf; + + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + mutex_unlock(&port->lock); + pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, + read.buf_add, + read.hdr.token, + read.uid); + rc = apr_send_pkt(ac->apr, (uint32_t *) &read); + if (rc < 0) { + pr_aud_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); + goto fail_cmd; + } + return 0; + } +fail_cmd: + return -EINVAL; +} + +int q6asm_read_nolock(struct audio_client *ac) +{ + struct asm_stream_cmd_read read; + struct audio_buffer *ab; + int dsp_buf; + struct audio_port_data *port; + int rc; + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[OUT]; + + q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); + + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + + pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n", + __func__, + ac->session, + dsp_buf, + (void *)port->buf[dsp_buf].data, + port->cpu_buf, + (void *)port->buf[port->cpu_buf].phys); + + read.hdr.opcode = ASM_DATA_CMD_READ; + read.buf_add = ab->phys; + read.buf_size = ab->size; + read.uid = port->dsp_buf; + read.hdr.token = port->dsp_buf; + + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__, + read.buf_add, + read.hdr.token, + read.uid); + rc = apr_send_pkt(ac->apr, (uint32_t *) &read); + if (rc < 0) { + pr_aud_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc); + goto fail_cmd; + } + return 0; + } +fail_cmd: + return -EINVAL; +} + +static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr, + uint32_t pkt_size, uint32_t cmd_flg) +{ + pr_debug("session=%d pkt size=%d cmd_flg=%d\n", pkt_size, cmd_flg, + ac->session); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(sizeof(struct apr_hdr)),\ + APR_PKT_VER); + hdr->src_svc = ((struct apr_svc *)ac->apr)->id; + hdr->src_domain = APR_DOMAIN_APPS; + hdr->dest_svc = APR_SVC_ASM; + hdr->dest_domain = APR_DOMAIN_ADSP; + hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01; + hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01; + if (cmd_flg) { + hdr->token = ac->session; + atomic_set(&ac->cmd_state, 1); + } + hdr->pkt_size = pkt_size; + return; +} + + +int q6asm_async_write(struct audio_client *ac, + struct audio_aio_write_param *param) +{ + int rc = 0; + struct asm_stream_cmd_write write; + + if (!ac || ac->apr == NULL) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE); + + /* Pass physical address as token for AIO scheme */ + write.hdr.token = param->uid; + write.hdr.opcode = ASM_DATA_CMD_WRITE; + write.buf_add = param->paddr; + write.avail_bytes = param->len; + write.uid = param->uid; + write.msw_ts = param->msw_ts; + write.lsw_ts = param->lsw_ts; + /* Use 0xFF00 for disabling timestamps */ + if (param->flags == 0xFF00) + write.uflags = (0x00000000 | (param->flags & 0x800000FF)); + else + write.uflags = (0x80000000 | param->flags); + + pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, + write.buf_add, write.avail_bytes); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &write); + if (rc < 0) { + pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__, + write.hdr.opcode, rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_async_read(struct audio_client *ac, + struct audio_aio_read_param *param) +{ + int rc = 0; + struct asm_stream_cmd_read read; + + if (!ac || ac->apr == NULL) { + pr_aud_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE); + + /* Pass physical address as token for AIO scheme */ + read.hdr.token = param->paddr; + read.hdr.opcode = ASM_DATA_CMD_READ; + read.buf_add = param->paddr; + read.buf_size = param->len; + read.uid = param->uid; + + pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session, + read.buf_add, read.buf_size); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &read); + if (rc < 0) { + pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__, + read.hdr.opcode, rc); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags) +{ + int rc = 0; + struct asm_stream_cmd_write write; + struct audio_port_data *port; + struct audio_buffer *ab; + int dsp_buf = 0; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[IN]; + + q6asm_add_hdr(ac, &write.hdr, sizeof(write), + FALSE); + mutex_lock(&port->lock); + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + + write.hdr.token = port->dsp_buf; + write.hdr.opcode = ASM_DATA_CMD_WRITE; + write.buf_add = ab->phys; + write.avail_bytes = len; + write.uid = port->dsp_buf; + write.msw_ts = msw_ts; + write.lsw_ts = lsw_ts; + /* Use 0xFF00 for disabling timestamps */ + if (flags == 0xFF00) + write.uflags = (0x00000000 | (flags & 0x800000FF)); + else + write.uflags = (0x80000000 | flags); + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + + pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]" + , __func__, + ab->phys, + write.buf_add, + write.hdr.token, + write.uid); + mutex_unlock(&port->lock); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &write); + if (rc < 0) { + pr_aud_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); + goto fail_cmd; + } + pr_debug("%s: WRITE SUCCESS\n", __func__); + return 0; + } +fail_cmd: + return -EINVAL; +} + +int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts, + uint32_t lsw_ts, uint32_t flags) +{ + int rc = 0; + struct asm_stream_cmd_write write; + struct audio_port_data *port; + struct audio_buffer *ab; + int dsp_buf = 0; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s: session[%d] len=%d", __func__, ac->session, len); + if (ac->io_mode == SYNC_IO_MODE) { + port = &ac->port[IN]; + + q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), + FALSE); + + dsp_buf = port->dsp_buf; + ab = &port->buf[dsp_buf]; + + write.hdr.token = port->dsp_buf; + write.hdr.opcode = ASM_DATA_CMD_WRITE; + write.buf_add = ab->phys; + write.avail_bytes = len; + write.uid = port->dsp_buf; + write.msw_ts = msw_ts; + write.lsw_ts = lsw_ts; + /* Use 0xFF00 for disabling timestamps */ + if (flags == 0xFF00) + write.uflags = (0x00000000 | (flags & 0x800000FF)); + else + write.uflags = (0x80000000 | flags); + port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1); + + pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]" + , __func__, + ab->phys, + write.buf_add, + write.hdr.token, + write.uid); + + rc = apr_send_pkt(ac->apr, (uint32_t *) &write); + if (rc < 0) { + pr_aud_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc); + goto fail_cmd; + } + pr_debug("%s: WRITE SUCCESS\n", __func__); + return 0; + } +fail_cmd: + return -EINVAL; +} + +uint64_t q6asm_get_session_time(struct audio_client *ac) +{ + struct apr_hdr hdr; + int rc; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); + hdr.opcode = ASM_SESSION_CMD_GET_SESSION_TIME; + atomic_set(&ac->time_flag, 1); + + pr_debug("%s: session[%d]opcode[0x%x]\n", __func__, + ac->session, + hdr.opcode); + rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_aud_err("Commmand 0x%x failed\n", hdr.opcode); + goto fail_cmd; + } + rc = wait_event_timeout(ac->time_wait, + (atomic_read(&ac->time_flag) == 0), 5*HZ); + if (!rc) { + pr_aud_err("%s: timeout in getting session time from DSP\n", + __func__); + goto fail_cmd; + } + return ac->time_stamp; + +fail_cmd: + return -EINVAL; +} + +int q6asm_cmd(struct audio_client *ac, int cmd) +{ + struct apr_hdr hdr; + int rc; + atomic_t *state; + int cnt = 0; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE); + switch (cmd) { + case CMD_PAUSE: + pr_debug("%s:CMD_PAUSE\n", __func__); + hdr.opcode = ASM_SESSION_CMD_PAUSE; + state = &ac->cmd_state; + break; + case CMD_FLUSH: + pr_debug("%s:CMD_FLUSH\n", __func__); + hdr.opcode = ASM_STREAM_CMD_FLUSH; + state = &ac->cmd_state; + break; + case CMD_EOS: + pr_debug("%s:CMD_EOS\n", __func__); + hdr.opcode = ASM_DATA_CMD_EOS; + atomic_set(&ac->cmd_state, 0); + state = &ac->cmd_state; + break; + case CMD_CLOSE: + pr_debug("%s:CMD_CLOSE\n", __func__); + hdr.opcode = ASM_STREAM_CMD_CLOSE; + state = &ac->cmd_state; + break; + default: + pr_aud_err("Invalid format[%d]\n", cmd); + goto fail_cmd; + } + pr_debug("%s:session[%d]opcode[0x%x] ", __func__, + ac->session, + hdr.opcode); + rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_aud_err("Commmand 0x%x failed\n", hdr.opcode); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for response opcode[0x%x]\n", + hdr.opcode); + goto fail_cmd; + } + if (cmd == CMD_FLUSH) + q6asm_reset_buf_state(ac); + if (cmd == CMD_CLOSE) { + /* check if DSP return all buffers */ + if (ac->port[IN].buf) { + for (cnt = 0; cnt < ac->port[IN].max_buf_cnt; + cnt++) { + if (ac->port[IN].buf[cnt].used == IN) { + pr_aud_info("Write Buf[%d] not returned\n", + cnt); + } + } + } + if (ac->port[OUT].buf) { + for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) { + if (ac->port[OUT].buf[cnt].used == OUT) { + pr_aud_info("Read Buf[%d] not returned\n", + cnt); + } + } + } + } + return 0; +fail_cmd: + return -EINVAL; +} + +int q6asm_cmd_nowait(struct audio_client *ac, int cmd) +{ + struct apr_hdr hdr; + int rc; + + if (!ac || ac->apr == NULL) { + pr_aud_err("%s:APR handle NULL\n", __func__); + return -EINVAL; + } + q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE); + switch (cmd) { + case CMD_PAUSE: + pr_debug("%s:CMD_PAUSE\n", __func__); + hdr.opcode = ASM_SESSION_CMD_PAUSE; + break; + case CMD_EOS: + pr_debug("%s:CMD_EOS\n", __func__); + hdr.opcode = ASM_DATA_CMD_EOS; + break; + default: + pr_aud_err("%s:Invalid format[%d]\n", __func__, cmd); + goto fail_cmd; + } + pr_debug("%s:session[%d]opcode[0x%x] ", __func__, + ac->session, + hdr.opcode); + rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_aud_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +static void q6asm_reset_buf_state(struct audio_client *ac) +{ + int cnt = 0; + int loopcnt = 0; + struct audio_port_data *port = NULL; + + if (ac->io_mode == SYNC_IO_MODE) { + mutex_lock(&ac->cmd_lock); + for (loopcnt = 0; loopcnt <= OUT; loopcnt++) { + port = &ac->port[loopcnt]; + cnt = port->max_buf_cnt - 1; + port->dsp_buf = 0; + port->cpu_buf = 0; + while (cnt >= 0) { + if (!port->buf) + continue; + port->buf[cnt].used = 1; + cnt--; + } + } + mutex_unlock(&ac->cmd_lock); + } +} + +int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable) +{ + struct asm_stream_cmd_reg_tx_overflow_event tx_overflow; + int rc; + + if (!ac || ac->apr == NULL) { + pr_aud_err("APR handle NULL\n"); + return -EINVAL; + } + pr_debug("%s:session[%d]enable[%d]\n", __func__, + ac->session, enable); + q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE); + + tx_overflow.hdr.opcode = \ + ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS; + /* tx overflow event: enable */ + tx_overflow.enable = enable; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow); + if (rc < 0) { + pr_aud_err("tx overflow op[0x%x]rc[%d]\n", \ + tx_overflow.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) == 0), 5*HZ); + if (!rc) { + pr_aud_err("timeout. waited for tx overflow\n"); + goto fail_cmd; + } + return 0; +fail_cmd: + return -EINVAL; +} + +#ifdef CONFIG_MSM8X60_RTAC +int q6asm_get_apr_service_id(int session_id) +{ + pr_debug("%s\n", __func__); + + if (session_id < 0) { + pr_aud_err("%s: invalid session_id = %d\n", __func__, session_id); + return -EINVAL; + } + + return ((struct apr_svc *)session[session_id]->apr)->id; +} +#endif + + +static int __init q6asm_init(void) +{ + pr_debug("%s\n", __func__); + init_waitqueue_head(&this_mmap.cmd_wait); + memset(session, 0, sizeof(session)); + return 0; +} + +device_initcall(q6asm_init); diff --git a/arch/arm/mach-msm/qdsp6v3/q6core.c b/arch/arm/mach-msm/qdsp6v3/q6core.c new file mode 100644 index 00000000000..8d739347f3d --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6core.c @@ -0,0 +1,348 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct dentry *dentry; +struct apr_svc_ch_dev *handle; +struct apr_svc *apr_handle_q; +struct apr_svc *apr_handle_m; +struct apr_svc *core_handle_q; +struct apr_client_data clnt_data; +char l_buf[4096]; + +#define TIMEOUT_MS 1000 +int32_t query_adsp_ver; +wait_queue_head_t adsp_version_wait; +uint32_t adsp_version; + +static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv) +{ + struct adsp_get_version *payload; + uint32_t *payload1; + struct adsp_service_info *svc_info; + int i; + + pr_aud_info("core msg: payload len = %d\n", data->payload_size); + switch (data->opcode) { + case APR_BASIC_RSP_RESULT:{ + payload1 = data->payload; + if (payload1[0] == ADSP_CMD_SET_POWER_COLLAPSE_STATE) { + pr_aud_info("Cmd[0x%x] status[0x%x]\n", payload1[0], + payload1[1]); + break; + } else + pr_aud_err("Invalid cmd rsp[0x%x][0x%x]\n", payload1[0], + payload1[1]); + break; + } + case ADSP_GET_VERSION_RSP:{ + if (data->payload_size) { + payload = data->payload; + if (query_adsp_ver == 1) { + query_adsp_ver = 0; + adsp_version = payload->build_id; + wake_up(&adsp_version_wait); + } + svc_info = (struct adsp_service_info *) + ((char *)payload + sizeof(struct adsp_get_version)); + pr_aud_info("----------------------------------------\n"); + pr_aud_info("Build id = %x\n", payload->build_id); + pr_aud_info("Number of services= %x\n", payload->svc_cnt); + pr_aud_info("----------------------------------------\n"); + for (i = 0; i < payload->svc_cnt; i++) { + pr_aud_info("svc-id[%d]\tver[%x.%x]\n", + svc_info[i].svc_id, + (svc_info[i].svc_ver & 0xFFFF0000) >> 16, + (svc_info[i].svc_ver & 0xFFFF)); + } + pr_aud_info("-----------------------------------------\n"); + } else + pr_aud_info("zero payload for ADSP_GET_VERSION_RSP\n"); + break; + } + case RESET_EVENTS:{ + pr_debug("Reset event received in Core service"); + apr_reset(core_handle_q); + core_handle_q = NULL; + break; + } + + default: + pr_aud_err("Message id from adsp core svc: %d\n", data->opcode); + break; + } + + return 0; +} + +static int32_t aprv2_debug_fn_q(struct apr_client_data *data, void *priv) +{ + pr_debug("Q6_Payload Length = %d\n", data->payload_size); + if (memcmp(data->payload, l_buf + 20, data->payload_size)) + pr_aud_info("FAIL: %d\n", data->payload_size); + else + pr_aud_info("SUCCESS: %d\n", data->payload_size); + return 0; +} + +static int32_t aprv2_debug_fn_m(struct apr_client_data *data, void *priv) +{ + pr_aud_info("M_Payload Length = %d\n", data->payload_size); + return 0; +} + +static ssize_t apr_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + pr_debug("apr debugfs opened\n"); + return 0; +} + +void *core_open(void) +{ + if (core_handle_q == NULL) { + core_handle_q = apr_register("ADSP", "CORE", + aprv2_core_fn_q, 0xFFFFFFFF, NULL); + } + pr_aud_info("Open_q %p\n", core_handle_q); + if (core_handle_q == NULL) { + pr_aud_err("%s: Unable to register CORE\n", __func__); + return NULL; + } + return core_handle_q; +} +EXPORT_SYMBOL(core_open); + +int32_t core_close(void) +{ + int ret = 0; + if (core_handle_q == NULL) { + pr_aud_err("CORE is already closed\n"); + ret = -EINVAL; + return ret; + } + apr_deregister(core_handle_q); + return ret; +} +EXPORT_SYMBOL(core_close); + +uint32_t core_get_adsp_version(void) +{ + struct apr_hdr *hdr; + int32_t rc = 0, ret = 0; + core_open(); + if (core_handle_q) { + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = ADSP_GET_VERSION; + + apr_send_pkt(core_handle_q, (uint32_t *)l_buf); + query_adsp_ver = 1; + pr_aud_info("Write_q\n"); + ret = wait_event_timeout(adsp_version_wait, + (query_adsp_ver == 0), + msecs_to_jiffies(TIMEOUT_MS)); + rc = adsp_version; + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + rc = -ENODEV; + } + } else + pr_aud_info("apr registration failed\n"); + return rc; +} +EXPORT_SYMBOL(core_get_adsp_version); + +static ssize_t apr_debug_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int len; + static int t_len; + + if (count < 0) + return 0; + len = count > 63 ? 63 : count; + if (copy_from_user(l_buf + 20 , buf, len)) { + pr_aud_info("Unable to copy data from user space\n"); + return -EFAULT; + } + l_buf[len + 20] = 0; + if (l_buf[len + 20 - 1] == '\n') { + l_buf[len + 20 - 1] = 0; + len--; + } + if (!strncmp(l_buf + 20, "open_q", 64)) { + apr_handle_q = apr_register("ADSP", "TEST", aprv2_debug_fn_q, + 0xFFFFFFFF, NULL); + pr_aud_info("Open_q %p\n", apr_handle_q); + } else if (!strncmp(l_buf + 20, "open_m", 64)) { + apr_handle_m = apr_register("MODEM", "TEST", aprv2_debug_fn_m, + 0xFFFFFFFF, NULL); + pr_aud_info("Open_m %p\n", apr_handle_m); + } else if (!strncmp(l_buf + 20, "write_q", 64)) { + struct apr_hdr *hdr; + + t_len++; + t_len = t_len % 450; + if (!t_len % 99) + msleep(2000); + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, t_len); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 20, 9, 4060); + + apr_send_pkt(apr_handle_q, (uint32_t *)l_buf); + pr_debug("Write_q\n"); + } else if (!strncmp(l_buf + 20, "write_m", 64)) { + struct apr_hdr *hdr; + + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, 8); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 30, 9, 4060); + + apr_send_pkt(apr_handle_m, (uint32_t *)l_buf); + pr_aud_info("Write_m\n"); + } else if (!strncmp(l_buf + 20, "write_q4", 64)) { + struct apr_hdr *hdr; + + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, 4076); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 30, 9, 4060); + + apr_send_pkt(apr_handle_q, (uint32_t *)l_buf); + pr_aud_info("Write_q\n"); + } else if (!strncmp(l_buf + 20, "write_m4", 64)) { + struct apr_hdr *hdr; + + hdr = (struct apr_hdr *)l_buf; + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(20), APR_PKT_VER); + hdr->pkt_size = APR_PKT_SIZE(20, 4076); + hdr->src_port = 0; + hdr->dest_port = 0; + hdr->token = 0; + hdr->opcode = 0x12345678; + memset(l_buf + 30, 9, 4060); + + apr_send_pkt(apr_handle_m, (uint32_t *)l_buf); + pr_aud_info("Write_m\n"); + } else if (!strncmp(l_buf + 20, "close", 64)) { + if (apr_handle_q) + apr_deregister(apr_handle_q); + } else if (!strncmp(l_buf + 20, "loaded", 64)) { + change_q6_state(APR_Q6_LOADED); + } else if (!strncmp(l_buf + 20, "boom", 64)) { + q6audio_dsp_not_responding(); + } else if (!strncmp(l_buf + 20, "dsp_ver", 64)) { + core_get_adsp_version(); + } else if (!strncmp(l_buf + 20, "en_pwr_col", 64)) { + struct adsp_power_collapse pc; + + core_handle_q = core_open(); + if (core_handle_q) { + pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(uint32_t));; + pc.hdr.src_port = 0; + pc.hdr.dest_port = 0; + pc.hdr.token = 0; + pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE; + pc.power_collapse = 0x00000000; + apr_send_pkt(core_handle_q, (uint32_t *)&pc); + pr_aud_info("Write_q :enable power collapse\n"); + } + } else if (!strncmp(l_buf + 20, "dis_pwr_col", 64)) { + struct adsp_power_collapse pc; + + core_handle_q = core_open(); + if (core_handle_q) { + pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(uint32_t)); + pc.hdr.src_port = 0; + pc.hdr.dest_port = 0; + pc.hdr.token = 0; + pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE; + pc.power_collapse = 0x00000001; + apr_send_pkt(core_handle_q, (uint32_t *)&pc); + pr_aud_info("Write_q:disable power collapse\n"); + } + } else + pr_aud_info("Unknown Command\n"); + + return count; +} + +static const struct file_operations apr_debug_fops = { + .write = apr_debug_write, + .open = apr_debug_open, +}; + +static int __init core_init(void) +{ +#ifdef CONFIG_DEBUG_FS + dentry = debugfs_create_file("apr", 0644, + NULL, (void *) NULL, &apr_debug_fops); +#endif /* CONFIG_DEBUG_FS */ + query_adsp_ver = 0; + init_waitqueue_head(&adsp_version_wait); + adsp_version = 0; + core_handle_q = NULL; + return 0; +} +device_initcall(core_init); + diff --git a/arch/arm/mach-msm/qdsp6v3/q6voice.c b/arch/arm/mach-msm/qdsp6v3/q6voice.c new file mode 100644 index 00000000000..e320184d0d9 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/q6voice.c @@ -0,0 +1,2812 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" +#include "rtac.h" +#include + +#define TIMEOUT_MS 3000 +#define SNDDEV_CAP_TTY 0x20 + +#define CMD_STATUS_SUCCESS 0 +#define CMD_STATUS_FAIL 1 + +#define VOC_PATH_PASSIVE 0 +#define VOC_PATH_FULL 1 +#define ADSP_VERSION_CVD 0x60300000 + +#define BUFFER_PAYLOAD_SIZE 4000 + +#define VOC_REC_NONE 0xFF + +#define AUD_LOG(x...) do { \ +struct timespec ts; \ +struct rtc_time tm; \ +getnstimeofday(&ts); \ +rtc_time_to_tm(ts.tv_sec, &tm); \ +printk(KERN_INFO "[AUD] " x); \ +printk("[AUD] at %lld (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", \ +ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \ +tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \ +} while (0) + + +struct voice_data voice; + +static bool is_adsp_support_cvd(void) +{ + return (voice.adsp_version >= ADSP_VERSION_CVD); +} +static int voice_send_enable_vocproc_cmd(struct voice_data *v); +static int voice_send_netid_timing_cmd(struct voice_data *v); + +static void *voice_get_apr_mvm(struct voice_data *v) +{ + void *apr_mvm = NULL; + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + apr_mvm = v->apr_mvm; + else + apr_mvm = v->apr_q6_mvm; + + pr_debug("%s: apr_mvm 0x%x\n", __func__, (unsigned int)apr_mvm); + + return apr_mvm; +} + +static void voice_set_apr_mvm(struct voice_data *v, void *apr_mvm) +{ + pr_debug("%s: apr_mvm 0x%x\n", __func__, (unsigned int)apr_mvm); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + v->apr_mvm = apr_mvm; + else + v->apr_q6_mvm = apr_mvm; +} + +static void *voice_get_apr_cvs(struct voice_data *v) +{ + void *apr_cvs = NULL; + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + apr_cvs = v->apr_cvs; + else + apr_cvs = v->apr_q6_cvs; + + pr_debug("%s: apr_cvs 0x%x\n", __func__, (unsigned int)apr_cvs); + + return apr_cvs; +} + +static void voice_set_apr_cvs(struct voice_data *v, void *apr_cvs) +{ + pr_debug("%s: apr_cvs 0x%x\n", __func__, (unsigned int)apr_cvs); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + v->apr_cvs = apr_cvs; + else + v->apr_q6_cvs = apr_cvs; +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_voice_handle(RTAC_CVS, apr_cvs); +#endif +} + +static void *voice_get_apr_cvp(struct voice_data *v) +{ + void *apr_cvp = NULL; + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + apr_cvp = v->apr_cvp; + else + apr_cvp = v->apr_q6_cvp; + + pr_debug("%s: apr_cvp 0x%x\n", __func__, (unsigned int)apr_cvp); + + return apr_cvp; +} + +static void voice_set_apr_cvp(struct voice_data *v, void *apr_cvp) +{ + pr_debug("%s: apr_cvp 0x%x\n", __func__, (unsigned int)apr_cvp); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) + v->apr_cvp = apr_cvp; + else + v->apr_q6_cvp = apr_cvp; +#ifdef CONFIG_MSM8X60_RTAC + rtac_set_voice_handle(RTAC_CVP, apr_cvp); +#endif +} + +static u16 voice_get_mvm_handle(struct voice_data *v) +{ + u16 mvm_handle = 0; + + if (v->voc_path == VOC_PATH_PASSIVE) + mvm_handle = v->mvm_handle; + else + mvm_handle = v->mvm_q6_handle; + + pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle); + + return mvm_handle; +} + +static void voice_set_mvm_handle(struct voice_data *v, u16 mvm_handle) +{ + pr_debug("%s: mvm_handle %d\n", __func__, mvm_handle); + + if (v->voc_path == VOC_PATH_PASSIVE) + v->mvm_handle = mvm_handle; + else + v->mvm_q6_handle = mvm_handle; +} + +static u16 voice_get_cvs_handle(struct voice_data *v) +{ + u16 cvs_handle = 0; + + if (v->voc_path == VOC_PATH_PASSIVE) + cvs_handle = v->cvs_handle; + else + cvs_handle = v->cvs_q6_handle; + + pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle); + + return cvs_handle; +} + +static void voice_set_cvs_handle(struct voice_data *v, u16 cvs_handle) +{ + pr_debug("%s: cvs_handle %d\n", __func__, cvs_handle); + + if (v->voc_path == VOC_PATH_PASSIVE) + v->cvs_handle = cvs_handle; + else + v->cvs_q6_handle = cvs_handle; +} + +static u16 voice_get_cvp_handle(struct voice_data *v) +{ + u16 cvp_handle = 0; + + if (v->voc_path == VOC_PATH_PASSIVE) + cvp_handle = v->cvp_handle; + else + cvp_handle = v->cvp_q6_handle; + + pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle); + + return cvp_handle; +} + +static void voice_set_cvp_handle(struct voice_data *v, u16 cvp_handle) +{ + pr_debug("%s: cvp_handle %d\n", __func__, cvp_handle); + + if (v->voc_path == VOC_PATH_PASSIVE) + v->cvp_handle = cvp_handle; + else + v->cvp_q6_handle = cvp_handle; +} + +static void voice_auddev_cb_function(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data); + +static int32_t modem_mvm_callback(struct apr_client_data *data, void *priv); +static int32_t modem_cvs_callback(struct apr_client_data *data, void *priv); +static int32_t modem_cvp_callback(struct apr_client_data *data, void *priv); + +static int voice_apr_register(struct voice_data *v) +{ + int rc = 0; + void *apr_mvm; + void *apr_cvs; + void *apr_cvp; + + if (v->adsp_version == 0) { + core_open(); + v->adsp_version = core_get_adsp_version(); + pr_aud_info("adsp_ver fetched:%x\n", v->adsp_version); + } + apr_mvm = voice_get_apr_mvm(v); + apr_cvs = voice_get_apr_cvs(v); + apr_cvp = voice_get_apr_cvp(v); + + + pr_debug("into voice_apr_register_callback\n"); + /* register callback to APR */ + if (apr_mvm == NULL) { + pr_debug("start to register MVM callback\n"); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) { + apr_mvm = apr_register("MODEM", "MVM", + modem_mvm_callback, 0xFFFFFFFF, + v); + } else { + apr_mvm = apr_register("ADSP", "MVM", + modem_mvm_callback, 0xFFFFFFFF, + v); + } + + if (apr_mvm == NULL) { + pr_aud_err("Unable to register MVM %d\n", + is_adsp_support_cvd()); + rc = -ENODEV; + goto done; + } + + voice_set_apr_mvm(v, apr_mvm); + } + + if (apr_cvs == NULL) { + pr_debug("start to register CVS callback\n"); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) { + apr_cvs = apr_register("MODEM", "CVS", + modem_cvs_callback, 0xFFFFFFFF, + v); + } else { + apr_cvs = apr_register("ADSP", "CVS", + modem_cvs_callback, 0xFFFFFFFF, + v); + } + + if (apr_cvs == NULL) { + pr_aud_err("Unable to register CVS %d\n", + is_adsp_support_cvd()); + rc = -ENODEV; + goto err; + } + + voice_set_apr_cvs(v, apr_cvs); + } + + if (apr_cvp == NULL) { + pr_debug("start to register CVP callback\n"); + + if (v->voc_path == VOC_PATH_PASSIVE && + !(is_adsp_support_cvd())) { + apr_cvp = apr_register("MODEM", "CVP", + modem_cvp_callback, 0xFFFFFFFF, + v); + } else { + apr_cvp = apr_register("ADSP", "CVP", + modem_cvp_callback, 0xFFFFFFFF, + v); + } + + if (apr_cvp == NULL) { + pr_aud_err("Unable to register CVP %d\n", + is_adsp_support_cvd()); + rc = -ENODEV; + goto err1; + } + + voice_set_apr_cvp(v, apr_cvp); + } + return 0; + +err1: + apr_deregister(apr_cvs); + apr_cvs = NULL; + voice_set_apr_cvs(v, apr_cvs); +err: + apr_deregister(apr_mvm); + apr_mvm = NULL; + voice_set_apr_mvm(v, apr_mvm); + +done: + return rc; +} + +static int voice_create_mvm_cvs_session(struct voice_data *v) +{ + int ret = 0; + struct mvm_create_ctl_session_cmd mvm_session_cmd; + struct cvs_create_passive_ctl_session_cmd cvs_session_cmd; + struct cvs_create_full_ctl_session_cmd cvs_full_ctl_cmd; + struct mvm_attach_stream_cmd attach_stream_cmd; + void *apr_mvm = voice_get_apr_mvm(v); + void *apr_cvs = voice_get_apr_cvs(v); + void *apr_cvp = voice_get_apr_cvp(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvs_handle = voice_get_cvs_handle(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + pr_aud_info("%s:\n", __func__); + + /* start to ping if modem service is up */ + pr_debug("in voice_create_mvm_cvs_session, mvm_hdl=%d, cvs_hdl=%d\n", + mvm_handle, cvs_handle); + /* send cmd to create mvm session and wait for response */ + + if (!mvm_handle) { + if (v->voc_path == VOC_PATH_PASSIVE) { + mvm_session_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_session_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_session_cmd) - APR_HDR_SIZE); + pr_debug("Send mvm create session pkt size = %d\n", + mvm_session_cmd.hdr.pkt_size); + mvm_session_cmd.hdr.src_port = 0; + mvm_session_cmd.hdr.dest_port = 0; + mvm_session_cmd.hdr.token = 0; + pr_debug("%s: Creating MVM passive ctrl\n", __func__); + mvm_session_cmd.hdr.opcode = + VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION; + strncpy(mvm_session_cmd.mvm_session.name, + "default modem voice", SESSION_NAME_LEN); + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, + (uint32_t *) &mvm_session_cmd); + if (ret < 0) { + pr_aud_err("Error sending MVM_CONTROL_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + } else { + mvm_session_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_session_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_session_cmd) - APR_HDR_SIZE); + pr_debug("Send mvm create session pkt size = %d\n", + mvm_session_cmd.hdr.pkt_size); + mvm_session_cmd.hdr.src_port = 0; + mvm_session_cmd.hdr.dest_port = 0; + mvm_session_cmd.hdr.token = 0; + pr_debug("%s: Creating MVM full ctrl\n", __func__); + mvm_session_cmd.hdr.opcode = + VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION; + strcpy(mvm_session_cmd.mvm_session.name, + "default voip"); + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, + (uint32_t *) &mvm_session_cmd); + if (ret < 0) { + pr_aud_err("Error sending MVM_FULL_CTL_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + } + + /* Get the created MVM handle. */ + mvm_handle = voice_get_mvm_handle(v); + } + + /* send cmd to create cvs session */ + if (!cvs_handle) { + if (v->voc_path == VOC_PATH_PASSIVE) { + pr_aud_info("%s:creating CVS passive session\n", __func__); + + cvs_session_cmd.hdr.hdr_field = APR_HDR_FIELD( + APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_session_cmd) - APR_HDR_SIZE); + pr_aud_info("send stream create session pkt size = %d\n", + cvs_session_cmd.hdr.pkt_size); + cvs_session_cmd.hdr.src_port = 0; + cvs_session_cmd.hdr.dest_port = 0; + cvs_session_cmd.hdr.token = 0; + cvs_session_cmd.hdr.opcode = + VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION; + strncpy(cvs_session_cmd.cvs_session.name, + "default modem voice", SESSION_NAME_LEN); + v->cvs_state = CMD_STATUS_FAIL; + + pr_aud_info("%s: CVS create\n", __func__); + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_session_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending STREAM_CONTROL_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* Get the created CVS handle. */ + cvs_handle = voice_get_cvs_handle(v); + } else { + pr_aud_info("%s:creating CVS full session\n", __func__); + + cvs_full_ctl_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + + cvs_full_ctl_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_full_ctl_cmd) - APR_HDR_SIZE); + + cvs_full_ctl_cmd.hdr.src_port = 0; + cvs_full_ctl_cmd.hdr.dest_port = 0; + cvs_full_ctl_cmd.hdr.token = 0; + cvs_full_ctl_cmd.hdr.opcode = + VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION; + cvs_full_ctl_cmd.cvs_session.direction = 2; + + cvs_full_ctl_cmd.cvs_session.enc_media_type = + v->mvs_info.media_type; + cvs_full_ctl_cmd.cvs_session.dec_media_type = + v->mvs_info.media_type; + cvs_full_ctl_cmd.cvs_session.network_id = + v->mvs_info.network_type; + strncpy(cvs_full_ctl_cmd.cvs_session.name, + "default voip", SESSION_NAME_LEN); + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, + (uint32_t *) &cvs_full_ctl_cmd); + + if (ret < 0) { + pr_aud_err("%s: Err %d sending CREATE_FULL_CTRL\n", + __func__, ret); + goto fail; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* Get the created CVS handle. */ + cvs_handle = voice_get_cvs_handle(v); + + /* Attach MVM to CVS. */ + pr_aud_info("%s: Attach MVM to stream\n", __func__); + + attach_stream_cmd.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + + attach_stream_cmd.hdr.pkt_size = + APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(attach_stream_cmd) - APR_HDR_SIZE); + attach_stream_cmd.hdr.src_port = 0; + attach_stream_cmd.hdr.dest_port = mvm_handle; + attach_stream_cmd.hdr.token = 0; + attach_stream_cmd.hdr.opcode = + VSS_IMVM_CMD_ATTACH_STREAM; + attach_stream_cmd.attach_stream.handle = cvs_handle; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, + (uint32_t *) &attach_stream_cmd); + if (ret < 0) { + pr_aud_err("%s: Error %d sending ATTACH_STREAM\n", + __func__, ret); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + } + } + + return 0; + +fail: + apr_deregister(apr_mvm); + apr_mvm = NULL; + voice_set_apr_mvm(v, apr_mvm); + + apr_deregister(apr_cvs); + apr_cvs = NULL; + voice_set_apr_cvs(v, apr_cvs); + + apr_deregister(apr_cvp); + apr_cvp = NULL; + voice_set_apr_cvp(v, apr_cvp); + + cvp_handle = 0; + voice_set_cvp_handle(v, cvp_handle); + + cvs_handle = 0; + voice_set_cvs_handle(v, cvs_handle); + + return -EINVAL; +} + +static int voice_destroy_mvm_cvs_session(struct voice_data *v) +{ + int ret = 0; + struct mvm_detach_stream_cmd detach_stream; + struct apr_hdr mvm_destroy; + struct apr_hdr cvs_destroy; + void *apr_mvm = voice_get_apr_mvm(v); + void *apr_cvs = voice_get_apr_cvs(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* MVM, CVS sessions are destroyed only for Full control sessions. */ + if (v->voc_path == VOC_PATH_FULL) { + pr_aud_info("%s: MVM detach stream\n", __func__); + + /* Detach voice stream. */ + detach_stream.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + detach_stream.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(detach_stream) - APR_HDR_SIZE); + detach_stream.hdr.src_port = 0; + detach_stream.hdr.dest_port = mvm_handle; + detach_stream.hdr.token = 0; + detach_stream.hdr.opcode = VSS_IMVM_CMD_DETACH_STREAM; + detach_stream.detach_stream.handle = cvs_handle; + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, (uint32_t *) &detach_stream); + if (ret < 0) { + pr_aud_err("%s: Error %d sending DETACH_STREAM\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait event timeout\n", __func__); + goto fail; + } + + /* Destroy CVS. */ + pr_aud_info("%s: CVS destroy session\n", __func__); + + cvs_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_destroy) - APR_HDR_SIZE); + cvs_destroy.src_port = 0; + cvs_destroy.dest_port = cvs_handle; + cvs_destroy.token = 0; + cvs_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_destroy); + if (ret < 0) { + pr_aud_err("%s: Error %d sending CVS DESTROY\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait event timeout\n", __func__); + + goto fail; + } + cvs_handle = 0; + voice_set_cvs_handle(v, cvs_handle); + + /* Destroy MVM. */ + pr_aud_info("%s: MVM destroy session\n", __func__); + + mvm_destroy.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + mvm_destroy.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_destroy) - APR_HDR_SIZE); + mvm_destroy.src_port = 0; + mvm_destroy.dest_port = mvm_handle; + mvm_destroy.token = 0; + mvm_destroy.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_destroy); + if (ret < 0) { + pr_aud_err("%s: Error %d sending MVM DESTROY\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait event timeout\n", __func__); + + goto fail; + } + mvm_handle = 0; + voice_set_mvm_handle(v, mvm_handle); + } + +fail: + return 0; +} + +static int voice_send_tty_mode_to_modem(struct voice_data *v) +{ + struct msm_snddev_info *dev_tx_info; + struct msm_snddev_info *dev_rx_info; + int tty_mode = 0; + int ret = 0; + struct mvm_set_tty_mode_cmd mvm_tty_mode_cmd; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + + dev_rx_info = audio_dev_ctrl_find_dev(v->dev_rx.dev_id); + if (IS_ERR(dev_rx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_rx.dev_id); + goto done; + } + + dev_tx_info = audio_dev_ctrl_find_dev(v->dev_tx.dev_id); + if (IS_ERR(dev_tx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_tx.dev_id); + goto done; + } + + if ((dev_rx_info->capability & SNDDEV_CAP_TTY) && + (dev_tx_info->capability & SNDDEV_CAP_TTY)) + tty_mode = 3; /* FULL */ + else if (!(dev_tx_info->capability & SNDDEV_CAP_TTY) && + (dev_rx_info->capability & SNDDEV_CAP_TTY)) + tty_mode = 2; /* VCO */ + else if ((dev_tx_info->capability & SNDDEV_CAP_TTY) && + !(dev_rx_info->capability & SNDDEV_CAP_TTY)) + tty_mode = 1; /* HCO */ + + if (tty_mode) { + /* send tty mode cmd to mvm */ + mvm_tty_mode_cmd.hdr.hdr_field = APR_HDR_FIELD( + APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + mvm_tty_mode_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_tty_mode_cmd) - APR_HDR_SIZE); + pr_debug("pkt size = %d\n", mvm_tty_mode_cmd.hdr.pkt_size); + mvm_tty_mode_cmd.hdr.src_port = 0; + mvm_tty_mode_cmd.hdr.dest_port = mvm_handle; + mvm_tty_mode_cmd.hdr.token = 0; + mvm_tty_mode_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_TTY_MODE; + mvm_tty_mode_cmd.tty_mode.mode = tty_mode; + pr_aud_info("tty mode =%d\n", mvm_tty_mode_cmd.tty_mode.mode); + + v->mvm_state = CMD_STATUS_FAIL; + pr_aud_info("%s: MVM set tty\n", __func__); + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_tty_mode_cmd); + if (ret < 0) { + pr_aud_err("Fail: sending VSS_ISTREAM_CMD_SET_TTY_MODE\n"); + goto done; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto done; + } + } + return 0; +done: + return -EINVAL; +} + +static int voice_send_cvs_cal_to_modem(struct voice_data *v) +{ + struct apr_hdr cvs_cal_cmd_hdr = {0}; + uint32_t *cmd_buf; + struct acdb_cal_data cal_data; + struct acdb_cal_block *cal_blk; + int32_t cal_size_per_network; + uint32_t *cal_data_per_network; + int index = 0; + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* fill the header */ + cvs_cal_cmd_hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_cal_cmd_hdr) - APR_HDR_SIZE); + cvs_cal_cmd_hdr.src_port = 0; + cvs_cal_cmd_hdr.dest_port = cvs_handle; + cvs_cal_cmd_hdr.token = 0; + cvs_cal_cmd_hdr.opcode = + VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA; + + pr_debug("voice_send_cvs_cal_to_modem\n"); + /* get the cvs cal data */ + get_vocstrm_cal(&cal_data); + if (cal_data.num_cal_blocks == 0) { + pr_aud_err("%s: No calibration data to send!\n", __func__); + goto done; + } + + /* send cvs cal to modem */ + cmd_buf = kzalloc((sizeof(struct apr_hdr) + BUFFER_PAYLOAD_SIZE), + GFP_KERNEL); + if (!cmd_buf) { + pr_aud_err("No memory is allocated.\n"); + return -ENOMEM; + } + pr_debug("----- num_cal_blocks=%d\n", (s32)cal_data.num_cal_blocks); + cal_blk = cal_data.cal_blocks; + pr_debug("cal_blk =%x\n", (uint32_t)cal_data.cal_blocks); + + for (; index < cal_data.num_cal_blocks; index++) { + cal_size_per_network = cal_blk[index].cal_size; + pr_debug(" cal size =%d\n", cal_size_per_network); + if (cal_size_per_network >= BUFFER_PAYLOAD_SIZE) + pr_aud_err("Cal size is too big\n"); + cal_data_per_network = (u32 *)cal_blk[index].cal_kvaddr; + pr_debug(" cal data=%x\n", (uint32_t)cal_data_per_network); + cvs_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + cal_size_per_network); + pr_debug("header size =%d, pkt_size =%d\n", + APR_HDR_SIZE, cvs_cal_cmd_hdr.pkt_size); + memcpy(cmd_buf, &cvs_cal_cmd_hdr, APR_HDR_SIZE); + memcpy(cmd_buf + (APR_HDR_SIZE / sizeof(uint32_t)), + cal_data_per_network, cal_size_per_network); + pr_debug("send cvs cal: index =%d\n", index); + v->cvs_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvs, cmd_buf); + if (ret < 0) { + pr_aud_err("Fail: sending cvs cal, idx=%d\n", index); + continue; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + } + kfree(cmd_buf); +done: + return 0; +} + +static int voice_send_cvp_cal_to_modem(struct voice_data *v) +{ + struct apr_hdr cvp_cal_cmd_hdr = {0}; + uint32_t *cmd_buf; + struct acdb_cal_data cal_data; + struct acdb_cal_block *cal_blk; + int32_t cal_size_per_network; + uint32_t *cal_data_per_network; + int index = 0; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + + /* fill the header */ + cvp_cal_cmd_hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_cal_cmd_hdr) - APR_HDR_SIZE); + cvp_cal_cmd_hdr.src_port = 0; + cvp_cal_cmd_hdr.dest_port = cvp_handle; + cvp_cal_cmd_hdr.token = 0; + cvp_cal_cmd_hdr.opcode = + VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA; + + /* get cal data */ + get_vocproc_cal(&cal_data); + if (cal_data.num_cal_blocks == 0) { + pr_aud_err("%s: No calibration data to send!\n", __func__); + goto done; + } + + /* send cal to modem */ + cmd_buf = kzalloc((sizeof(struct apr_hdr) + BUFFER_PAYLOAD_SIZE), + GFP_KERNEL); + if (!cmd_buf) { + pr_aud_err("No memory is allocated.\n"); + return -ENOMEM; + } + pr_debug("----- num_cal_blocks=%d\n", (s32)cal_data.num_cal_blocks); + cal_blk = cal_data.cal_blocks; + pr_debug(" cal_blk =%x\n", (uint32_t)cal_data.cal_blocks); + + for (; index < cal_data.num_cal_blocks; index++) { + cal_size_per_network = cal_blk[index].cal_size; + if (cal_size_per_network >= BUFFER_PAYLOAD_SIZE) + pr_aud_err("Cal size is too big\n"); + pr_debug(" cal size =%d\n", cal_size_per_network); + cal_data_per_network = (u32 *)cal_blk[index].cal_kvaddr; + pr_debug(" cal data=%x\n", (uint32_t)cal_data_per_network); + + cvp_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + cal_size_per_network); + memcpy(cmd_buf, &cvp_cal_cmd_hdr, APR_HDR_SIZE); + memcpy(cmd_buf + (APR_HDR_SIZE / sizeof(*cmd_buf)), + cal_data_per_network, cal_size_per_network); + pr_debug("Send cvp cal\n"); + v->cvp_state = CMD_STATUS_FAIL; + pr_aud_info("%s: CVP calib\n", __func__); + ret = apr_send_pkt(apr_cvp, cmd_buf); + if (ret < 0) { + pr_aud_err("Fail: sending cvp cal, idx=%d\n", index); + continue; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + } + kfree(cmd_buf); +done: + return 0; +} + +static int voice_send_cvp_vol_tbl_to_modem(struct voice_data *v) +{ + struct apr_hdr cvp_vol_cal_cmd_hdr = {0}; + uint32_t *cmd_buf; + struct acdb_cal_data cal_data; + struct acdb_cal_block *cal_blk; + int32_t cal_size_per_network; + uint32_t *cal_data_per_network; + int index = 0; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + + /* fill the header */ + cvp_vol_cal_cmd_hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_vol_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_vol_cal_cmd_hdr) - APR_HDR_SIZE); + cvp_vol_cal_cmd_hdr.src_port = 0; + cvp_vol_cal_cmd_hdr.dest_port = cvp_handle; + cvp_vol_cal_cmd_hdr.token = 0; + cvp_vol_cal_cmd_hdr.opcode = + VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE; + + /* get cal data */ + get_vocvol_cal(&cal_data); + if (cal_data.num_cal_blocks == 0) { + pr_aud_err("%s: No calibration data to send!\n", __func__); + goto done; + } + + /* send cal to modem */ + cmd_buf = kzalloc((sizeof(struct apr_hdr) + BUFFER_PAYLOAD_SIZE), + GFP_KERNEL); + if (!cmd_buf) { + pr_aud_err("No memory is allocated.\n"); + return -ENOMEM; + } + pr_debug("----- num_cal_blocks=%d\n", (s32)cal_data.num_cal_blocks); + cal_blk = cal_data.cal_blocks; + pr_debug("Cal_blk =%x\n", (uint32_t)cal_data.cal_blocks); + + for (; index < cal_data.num_cal_blocks; index++) { + cal_size_per_network = cal_blk[index].cal_size; + cal_data_per_network = (u32 *)cal_blk[index].cal_kvaddr; + pr_debug("Cal size =%d, index=%d\n", cal_size_per_network, + index); + pr_debug("Cal data=%x\n", (uint32_t)cal_data_per_network); + cvp_vol_cal_cmd_hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + cal_size_per_network); + memcpy(cmd_buf, &cvp_vol_cal_cmd_hdr, APR_HDR_SIZE); + memcpy(cmd_buf + (APR_HDR_SIZE / sizeof(uint32_t)), + cal_data_per_network, cal_size_per_network); + pr_debug("Send vol table\n"); + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, cmd_buf); + if (ret < 0) { + pr_aud_err("Fail: sending cvp vol cal, idx=%d\n", index); + continue; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + } + kfree(cmd_buf); +done: + return 0; +} + +static int voice_set_dtx(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* Set DTX */ + struct cvs_set_enc_dtx_mode_cmd cvs_set_dtx = { + .hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER), + .hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_dtx) - APR_HDR_SIZE), + .hdr.src_port = 0, + .hdr.dest_port = cvs_handle, + .hdr.token = 0, + .hdr.opcode = VSS_ISTREAM_CMD_SET_ENC_DTX_MODE, + .dtx_mode.enable = v->mvs_info.dtx_mode, + }; + + pr_debug("%s: Setting DTX %d\n", __func__, v->mvs_info.dtx_mode); + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_dtx); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_DTX\n", __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + } + +done: + return ret; +} + +static int voice_config_cvs_vocoder(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* Set media type. */ + struct cvs_set_media_type_cmd cvs_set_media_cmd; + + pr_aud_info("%s: Setting media type\n", __func__); + + cvs_set_media_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_media_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_media_cmd) - APR_HDR_SIZE); + cvs_set_media_cmd.hdr.src_port = 0; + cvs_set_media_cmd.hdr.dest_port = cvs_handle; + cvs_set_media_cmd.hdr.token = 0; + cvs_set_media_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MEDIA_TYPE; + cvs_set_media_cmd.media_type.tx_media_id = v->mvs_info.media_type; + cvs_set_media_cmd.media_type.rx_media_id = v->mvs_info.media_type; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_media_cmd); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_MEDIA_TYPE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + /* Set encoder properties. */ + switch (v->mvs_info.media_type) { + case VSS_MEDIA_ID_EVRC_MODEM: { + struct cvs_set_cdma_enc_minmax_rate_cmd cvs_set_cdma_rate; + + pr_aud_info("%s: Setting EVRC min-max rate\n", __func__); + + cvs_set_cdma_rate.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_cdma_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_cdma_rate) - APR_HDR_SIZE); + cvs_set_cdma_rate.hdr.src_port = 0; + cvs_set_cdma_rate.hdr.dest_port = cvs_handle; + cvs_set_cdma_rate.hdr.token = 0; + cvs_set_cdma_rate.hdr.opcode = + VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE; + cvs_set_cdma_rate.cdma_rate.min_rate = v->mvs_info.rate; + cvs_set_cdma_rate.cdma_rate.max_rate = v->mvs_info.rate; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_cdma_rate); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_EVRC_MINMAX_RATE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + break; + } + + case VSS_MEDIA_ID_AMR_NB_MODEM: { + struct cvs_set_amr_enc_rate_cmd cvs_set_amr_rate; + + pr_aud_info("%s: Setting AMR rate\n", __func__); + + cvs_set_amr_rate.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_amr_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_amr_rate) - APR_HDR_SIZE); + cvs_set_amr_rate.hdr.src_port = 0; + cvs_set_amr_rate.hdr.dest_port = cvs_handle; + cvs_set_amr_rate.hdr.token = 0; + cvs_set_amr_rate.hdr.opcode = + VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE; + cvs_set_amr_rate.amr_rate.mode = v->mvs_info.rate; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amr_rate); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_AMR_RATE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + ret = voice_set_dtx(v); + + break; + } + + case VSS_MEDIA_ID_AMR_WB_MODEM: { + struct cvs_set_amrwb_enc_rate_cmd cvs_set_amrwb_rate; + + pr_aud_info("%s: Setting AMR WB rate\n", __func__); + + cvs_set_amrwb_rate.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + cvs_set_amrwb_rate.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_set_amrwb_rate) - APR_HDR_SIZE); + cvs_set_amrwb_rate.hdr.src_port = 0; + cvs_set_amrwb_rate.hdr.dest_port = cvs_handle; + cvs_set_amrwb_rate.hdr.token = 0; + cvs_set_amrwb_rate.hdr.opcode = + VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE; + cvs_set_amrwb_rate.amrwb_rate.mode = v->mvs_info.rate; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_amrwb_rate); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_AMRWB_RATE\n", + __func__, ret); + + goto done; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + ret = -EINVAL; + goto done; + } + + ret = voice_set_dtx(v); + + break; + } + + case VSS_MEDIA_ID_G729: + case VSS_MEDIA_ID_G711_ALAW: + case VSS_MEDIA_ID_G711_MULAW: { + ret = voice_set_dtx(v); + + break; + } + + default: { + /* Do nothing. */ + } + } + +done: + return ret; +} + +static int voice_send_start_voice_cmd(struct voice_data *v) +{ + struct apr_hdr mvm_start_voice_cmd; + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + + mvm_start_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_start_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_start_voice_cmd) - APR_HDR_SIZE); + AUD_LOG("send mvm_start_voice_cmd pkt size = %d\n", + mvm_start_voice_cmd.pkt_size); + mvm_start_voice_cmd.src_port = 0; + mvm_start_voice_cmd.dest_port = mvm_handle; + mvm_start_voice_cmd.token = 0; + mvm_start_voice_cmd.opcode = VSS_IMVM_CMD_START_VOICE; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_start_voice_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IMVM_CMD_START_VOICE\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_disable_vocproc(struct voice_data *v) +{ + struct apr_hdr cvp_disable_cmd; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* disable vocproc and wait for respose */ + cvp_disable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_disable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_disable_cmd) - APR_HDR_SIZE); + pr_debug("cvp_disable_cmd pkt size = %d, cvp_handle=%d\n", + cvp_disable_cmd.pkt_size, cvp_handle); + cvp_disable_cmd.src_port = 0; + cvp_disable_cmd.dest_port = cvp_handle; + cvp_disable_cmd.token = 0; + cvp_disable_cmd.opcode = VSS_IVOCPROC_CMD_DISABLE; + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_disable_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IVOCPROC_CMD_DISABLE\n"); + goto fail; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } +#ifdef CONFIG_MSM8X60_RTAC + rtac_remove_voice(v); +#endif + + return 0; +fail: + return -EINVAL; +} + +static int voice_set_device(struct voice_data *v) +{ + struct cvp_set_device_cmd cvp_setdev_cmd; + struct msm_snddev_info *dev_tx_info; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + + /* set device and wait for response */ + cvp_setdev_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_setdev_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_setdev_cmd) - APR_HDR_SIZE); + pr_debug(" send create cvp setdev, pkt size = %d\n", + cvp_setdev_cmd.hdr.pkt_size); + cvp_setdev_cmd.hdr.src_port = 0; + cvp_setdev_cmd.hdr.dest_port = cvp_handle; + cvp_setdev_cmd.hdr.token = 0; + cvp_setdev_cmd.hdr.opcode = VSS_IVOCPROC_CMD_SET_DEVICE; + + dev_tx_info = audio_dev_ctrl_find_dev(v->dev_tx.dev_id); + if (IS_ERR(dev_tx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_tx.dev_id); + goto fail; + } + + if (dev_tx_info->channel_mode > 1) + cvp_setdev_cmd.cvp_set_device.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE; + else + cvp_setdev_cmd.cvp_set_device.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS; + cvp_setdev_cmd.cvp_set_device.rx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT; + cvp_setdev_cmd.cvp_set_device.tx_port_id = v->dev_tx.dev_port_id; + cvp_setdev_cmd.cvp_set_device.rx_port_id = v->dev_rx.dev_port_id; + pr_aud_info("topology=%d , tx_port_id=%d, rx_port_id=%d\n", + cvp_setdev_cmd.cvp_set_device.tx_topology_id, + cvp_setdev_cmd.cvp_set_device.tx_port_id, + cvp_setdev_cmd.cvp_set_device.rx_port_id); + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_setdev_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n"); + goto fail; + } + pr_debug("wait for cvp create session event\n"); + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* send cvs cal */ + voice_send_cvs_cal_to_modem(v); + + /* send cvp cal */ + voice_send_cvp_cal_to_modem(v); + + /* send cvp vol table cal */ + voice_send_cvp_vol_tbl_to_modem(v); + + /* enable vocproc and wait for respose */ + voice_send_enable_vocproc_cmd(v); + + /* send tty mode if tty device is used */ + voice_send_tty_mode_to_modem(v); + + if (v->voc_path == VOC_PATH_FULL) + voice_send_netid_timing_cmd(v); + +#ifdef CONFIG_MSM8X60_RTAC + rtac_add_voice(v); +#endif + + return 0; +fail: + return -EINVAL; +} + +static int voice_send_stop_voice_cmd(struct voice_data *v) +{ + struct apr_hdr mvm_stop_voice_cmd; + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + + mvm_stop_voice_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_stop_voice_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_stop_voice_cmd) - APR_HDR_SIZE); + AUD_LOG("send mvm_stop_voice_cmd pkt size = %d\n", + mvm_stop_voice_cmd.pkt_size); + mvm_stop_voice_cmd.src_port = 0; + mvm_stop_voice_cmd.dest_port = mvm_handle; + mvm_stop_voice_cmd.token = 0; + mvm_stop_voice_cmd.opcode = VSS_IMVM_CMD_STOP_VOICE; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_stop_voice_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IMVM_CMD_STOP_VOICE\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_setup_modem_voice(struct voice_data *v) +{ + struct cvp_create_full_ctl_session_cmd cvp_session_cmd; + int ret = 0; + struct msm_snddev_info *dev_tx_info; + void *apr_cvp = voice_get_apr_cvp(v); + + /* create cvp session and wait for response */ + cvp_session_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_session_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_session_cmd) - APR_HDR_SIZE); + pr_aud_info(" send create cvp session, pkt size = %d\n", + cvp_session_cmd.hdr.pkt_size); + cvp_session_cmd.hdr.src_port = 0; + cvp_session_cmd.hdr.dest_port = 0; + cvp_session_cmd.hdr.token = 0; + cvp_session_cmd.hdr.opcode = + VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION; + + dev_tx_info = audio_dev_ctrl_find_dev(v->dev_tx.dev_id); + if (IS_ERR(dev_tx_info)) { + pr_aud_err("bad dev_id %d\n", v->dev_tx.dev_id); + goto fail; + } + + if (dev_tx_info->channel_mode > 1) + cvp_session_cmd.cvp_session.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE; + else + cvp_session_cmd.cvp_session.tx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS; + cvp_session_cmd.cvp_session.rx_topology_id = + VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT; + cvp_session_cmd.cvp_session.direction = 2; /*tx and rx*/ + cvp_session_cmd.cvp_session.network_id = VSS_NETWORK_ID_DEFAULT; + cvp_session_cmd.cvp_session.tx_port_id = v->dev_tx.dev_port_id; + cvp_session_cmd.cvp_session.rx_port_id = v->dev_rx.dev_port_id; + pr_aud_info("topology=%d net_id=%d, dir=%d tx_port_id=%d, rx_port_id=%d\n", + cvp_session_cmd.cvp_session.tx_topology_id, + cvp_session_cmd.cvp_session.network_id, + cvp_session_cmd.cvp_session.direction, + cvp_session_cmd.cvp_session.tx_port_id, + cvp_session_cmd.cvp_session.rx_port_id); + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_session_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VOCPROC_FULL_CONTROL_SESSION\n"); + goto fail; + } + pr_debug("wait for cvp create session event\n"); + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* send cvs cal */ + voice_send_cvs_cal_to_modem(v); + + /* send cvp cal */ + voice_send_cvp_cal_to_modem(v); + + /* send cvp vol table cal */ + voice_send_cvp_vol_tbl_to_modem(v); + + return 0; + +fail: + return -EINVAL; +} + +static int voice_send_enable_vocproc_cmd(struct voice_data *v) +{ + int ret = 0; + struct apr_hdr cvp_enable_cmd; + + u16 cvp_handle = voice_get_cvp_handle(v); + void *apr_cvp = voice_get_apr_cvp(v); + + /* enable vocproc and wait for respose */ + cvp_enable_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_enable_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_enable_cmd) - APR_HDR_SIZE); + pr_debug("cvp_enable_cmd pkt size = %d, cvp_handle=%d\n", + cvp_enable_cmd.pkt_size, cvp_handle); + cvp_enable_cmd.src_port = 0; + cvp_enable_cmd.dest_port = cvp_handle; + cvp_enable_cmd.token = 0; + cvp_enable_cmd.opcode = VSS_IVOCPROC_CMD_ENABLE; + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_enable_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_IVOCPROC_CMD_ENABLE\n"); + goto fail; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_send_netid_timing_cmd(struct voice_data *v) +{ + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + struct mvm_set_network_cmd mvm_set_network; + struct mvm_set_voice_timing_cmd mvm_set_voice_timing; + u16 mvm_handle = voice_get_mvm_handle(v); + + ret = voice_config_cvs_vocoder(v); + if (ret < 0) { + pr_aud_err("%s: Error %d configuring CVS voc", + __func__, ret); + goto fail; + } + /* Set network ID. */ + pr_debug("%s: Setting network ID\n", __func__); + + mvm_set_network.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_set_network.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_set_network) - APR_HDR_SIZE); + mvm_set_network.hdr.src_port = 0; + mvm_set_network.hdr.dest_port = mvm_handle; + mvm_set_network.hdr.token = 0; + mvm_set_network.hdr.opcode = VSS_ICOMMON_CMD_SET_NETWORK; + mvm_set_network.network.network_id = v->mvs_info.network_type; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_network); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_NETWORK\n", __func__, ret); + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* Set voice timing. */ + pr_debug("%s: Setting voice timing\n", __func__); + + mvm_set_voice_timing.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_set_voice_timing.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_set_voice_timing) - APR_HDR_SIZE); + mvm_set_voice_timing.hdr.src_port = 0; + mvm_set_voice_timing.hdr.dest_port = mvm_handle; + mvm_set_voice_timing.hdr.token = 0; + mvm_set_voice_timing.hdr.opcode = + VSS_ICOMMON_CMD_SET_VOICE_TIMING; + mvm_set_voice_timing.timing.mode = 0; + mvm_set_voice_timing.timing.enc_offset = 8000; + mvm_set_voice_timing.timing.dec_req_offset = 3300; + mvm_set_voice_timing.timing.dec_offset = 8300; + + v->mvm_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_voice_timing); + if (ret < 0) { + pr_aud_err("%s: Error %d sending SET_TIMING\n", __func__, ret); + goto fail; + } + + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + return 0; +fail: + return -EINVAL; +} + +static int voice_attach_vocproc(struct voice_data *v) +{ + int ret = 0; + struct mvm_attach_vocproc_cmd mvm_a_vocproc_cmd; + void *apr_mvm = voice_get_apr_mvm(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* send enable vocproc */ + voice_send_enable_vocproc_cmd(v); + + /* attach vocproc and wait for response */ + mvm_a_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_a_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_a_vocproc_cmd) - APR_HDR_SIZE); + pr_aud_info("send mvm_a_vocproc_cmd pkt size = %d\n", + mvm_a_vocproc_cmd.hdr.pkt_size); + mvm_a_vocproc_cmd.hdr.src_port = 0; + mvm_a_vocproc_cmd.hdr.dest_port = mvm_handle; + mvm_a_vocproc_cmd.hdr.token = 0; + mvm_a_vocproc_cmd.hdr.opcode = VSS_ISTREAM_CMD_ATTACH_VOCPROC; + mvm_a_vocproc_cmd.mvm_attach_cvp_handle.handle = cvp_handle; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_a_vocproc_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_ISTREAM_CMD_ATTACH_VOCPROC\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* send tty mode if tty device is used */ + voice_send_tty_mode_to_modem(v); + + if (v->voc_path == VOC_PATH_FULL) + voice_send_netid_timing_cmd(v); + +#ifdef CONFIG_MSM8X60_RTAC + rtac_add_voice(v); +#endif + return 0; +fail: + return -EINVAL; +} + +static int voice_destroy_modem_voice(struct voice_data *v) +{ + struct mvm_detach_vocproc_cmd mvm_d_vocproc_cmd; + struct apr_hdr cvp_destroy_session_cmd; + int ret = 0; + void *apr_mvm = voice_get_apr_mvm(v); + void *apr_cvp = voice_get_apr_cvp(v); + u16 mvm_handle = voice_get_mvm_handle(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* detach VOCPROC and wait for response from mvm */ + mvm_d_vocproc_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + mvm_d_vocproc_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(mvm_d_vocproc_cmd) - APR_HDR_SIZE); + pr_aud_info("mvm_d_vocproc_cmd pkt size = %d\n", + mvm_d_vocproc_cmd.hdr.pkt_size); + mvm_d_vocproc_cmd.hdr.src_port = 0; + mvm_d_vocproc_cmd.hdr.dest_port = mvm_handle; + mvm_d_vocproc_cmd.hdr.token = 0; + mvm_d_vocproc_cmd.hdr.opcode = VSS_ISTREAM_CMD_DETACH_VOCPROC; + mvm_d_vocproc_cmd.mvm_detach_cvp_handle.handle = cvp_handle; + + v->mvm_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_d_vocproc_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending VSS_ISTREAM_CMD_DETACH_VOCPROC\n"); + goto fail; + } + ret = wait_event_timeout(v->mvm_wait, + (v->mvm_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + + /* destrop cvp session */ + cvp_destroy_session_cmd.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_destroy_session_cmd.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_destroy_session_cmd) - APR_HDR_SIZE); + pr_aud_info("cvp_destroy_session_cmd pkt size = %d\n", + cvp_destroy_session_cmd.pkt_size); + cvp_destroy_session_cmd.src_port = 0; + cvp_destroy_session_cmd.dest_port = cvp_handle; + cvp_destroy_session_cmd.token = 0; + cvp_destroy_session_cmd.opcode = APRV2_IBASIC_CMD_DESTROY_SESSION; + + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_destroy_session_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending APRV2_IBASIC_CMD_DESTROY_SESSION\n"); + goto fail; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + goto fail; + } + +#ifdef CONFIG_MSM8X60_RTAC + rtac_remove_voice(v); +#endif + cvp_handle = 0; + voice_set_cvp_handle(v, cvp_handle); + + return 0; + +fail: + return -EINVAL; +} + +static int voice_send_mute_cmd_to_modem(struct voice_data *v) +{ + struct cvs_set_mute_cmd cvs_mute_cmd; + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + + /* send mute/unmute to cvs */ + cvs_mute_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_mute_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_mute_cmd) - APR_HDR_SIZE); + cvs_mute_cmd.hdr.src_port = 0; + cvs_mute_cmd.hdr.dest_port = cvs_handle; + cvs_mute_cmd.hdr.token = 0; + cvs_mute_cmd.hdr.opcode = VSS_ISTREAM_CMD_SET_MUTE; + cvs_mute_cmd.cvs_set_mute.direction = 0; /*tx*/ + cvs_mute_cmd.cvs_set_mute.mute_flag = v->dev_tx.mute; + + pr_aud_info(" mute value =%d\n", cvs_mute_cmd.cvs_set_mute.mute_flag); + v->cvs_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_mute_cmd); + if (ret < 0) { + pr_aud_err("Fail: send STREAM SET MUTE\n"); + goto fail; + } + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) + pr_aud_err("%s: wait_event timeout\n", __func__); + +fail: + return 0; +} + +static int voice_send_vol_index_to_modem(struct voice_data *v) +{ + struct cvp_set_rx_volume_index_cmd cvp_vol_cmd; + int ret = 0; + void *apr_cvp = voice_get_apr_cvp(v); + u16 cvp_handle = voice_get_cvp_handle(v); + + /* send volume index to cvp */ + cvp_vol_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvp_vol_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvp_vol_cmd) - APR_HDR_SIZE); + cvp_vol_cmd.hdr.src_port = 0; + cvp_vol_cmd.hdr.dest_port = cvp_handle; + cvp_vol_cmd.hdr.token = 0; + cvp_vol_cmd.hdr.opcode = + VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX; + cvp_vol_cmd.cvp_set_vol_idx.vol_index = v->dev_rx.volume; + v->cvp_state = CMD_STATUS_FAIL; + ret = apr_send_pkt(apr_cvp, (uint32_t *) &cvp_vol_cmd); + if (ret < 0) { + pr_aud_err("Fail in sending RX VOL INDEX\n"); + return -EINVAL; + } + ret = wait_event_timeout(v->cvp_wait, + (v->cvp_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + return -EINVAL; + } + return 0; +} + +static int voice_cvs_start_record(struct voice_data *v, uint32_t rec_mode) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct cvs_start_record_cmd cvs_start_record; + + pr_debug("%s: Start record %d\n", __func__, rec_mode); + + cvs_start_record.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_start_record.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_start_record) - APR_HDR_SIZE); + cvs_start_record.hdr.src_port = 0; + cvs_start_record.hdr.dest_port = cvs_handle; + cvs_start_record.hdr.token = 0; + cvs_start_record.hdr.opcode = VSS_ISTREAM_CMD_START_RECORD; + + if (rec_mode == VOC_REC_UPLINK) { + cvs_start_record.rec_mode.rx_tap_point = VSS_TAP_POINT_NONE; + cvs_start_record.rec_mode.tx_tap_point = + VSS_TAP_POINT_STREAM_END; + } else if (rec_mode == VOC_REC_DOWNLINK) { + cvs_start_record.rec_mode.rx_tap_point = + VSS_TAP_POINT_STREAM_END; + cvs_start_record.rec_mode.tx_tap_point = VSS_TAP_POINT_NONE; + } else if (rec_mode == VOC_REC_BOTH) { + cvs_start_record.rec_mode.rx_tap_point = + VSS_TAP_POINT_STREAM_END; + cvs_start_record.rec_mode.tx_tap_point = + VSS_TAP_POINT_STREAM_END; + } else { + pr_aud_err("%s: Invalid in-call rec_mode %d\n", __func__, rec_mode); + + ret = -EINVAL; + goto fail; + } + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_record); + if (ret < 0) { + pr_aud_err("%s: Error %d sending START_RECORD\n", __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + return 0; + +fail: + return ret; +} + +static int voice_cvs_stop_record(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct apr_hdr cvs_stop_record; + + pr_debug("%s: Stop record\n", __func__); + + cvs_stop_record.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_stop_record.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_stop_record) - APR_HDR_SIZE); + cvs_stop_record.src_port = 0; + cvs_stop_record.dest_port = cvs_handle; + cvs_stop_record.token = 0; + cvs_stop_record.opcode = VSS_ISTREAM_CMD_STOP_RECORD; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_record); + if (ret < 0) { + pr_aud_err("%s: Error %d sending STOP_RECORD\n", __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + return 0; + +fail: + return ret; +} + +int voice_start_record(uint32_t rec_mode, uint32_t set) +{ + int ret = 0; + u16 cvs_handle; + + pr_debug("%s: rec_mode %d, set %d\n", __func__, rec_mode, set); + + mutex_lock(&voice.lock); + + cvs_handle = voice_get_cvs_handle(&voice); + + if (cvs_handle != 0) { + if (set) + ret = voice_cvs_start_record(&voice, rec_mode); + else + ret = voice_cvs_stop_record(&voice); + } else { + /* Cache the value for later. */ + voice.rec_info.pending = set; + voice.rec_info.rec_mode = rec_mode; + } + + mutex_unlock(&voice.lock); + + return ret; +} + +static int voice_cvs_start_playback(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct apr_hdr cvs_start_playback; + + pr_debug("%s: Start playback\n", __func__); + + cvs_start_playback.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_start_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_start_playback) - APR_HDR_SIZE); + cvs_start_playback.src_port = 0; + cvs_start_playback.dest_port = cvs_handle; + cvs_start_playback.token = 0; + cvs_start_playback.opcode = VSS_ISTREAM_CMD_START_PLAYBACK; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_start_playback); + if (ret < 0) { + pr_aud_err("%s: Error %d sending START_PLAYBACK\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + v->music_info.playing = 1; + + return 0; + +fail: + return ret; +} + +static int voice_cvs_stop_playback(struct voice_data *v) +{ + int ret = 0; + void *apr_cvs = voice_get_apr_cvs(v); + u16 cvs_handle = voice_get_cvs_handle(v); + struct apr_hdr cvs_stop_playback; + + pr_debug("%s: Stop playback\n", __func__); + + if (v->music_info.playing) { + cvs_stop_playback.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + cvs_stop_playback.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(cvs_stop_playback) - APR_HDR_SIZE); + cvs_stop_playback.src_port = 0; + cvs_stop_playback.dest_port = cvs_handle; + cvs_stop_playback.token = 0; + + cvs_stop_playback.opcode = VSS_ISTREAM_CMD_STOP_PLAYBACK; + + v->cvs_state = CMD_STATUS_FAIL; + + ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_stop_playback); + if (ret < 0) { + pr_aud_err("%s: Error %d sending STOP_PLAYBACK\n", + __func__, ret); + + goto fail; + } + + ret = wait_event_timeout(v->cvs_wait, + (v->cvs_state == CMD_STATUS_SUCCESS), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + pr_aud_err("%s: wait_event timeout\n", __func__); + + goto fail; + } + + v->music_info.playing = 0; + } else { + pr_aud_err("%s: Stop playback already sent\n", __func__); + } + + return 0; + +fail: + return ret; +} + +int voice_start_playback(uint32_t set) +{ + int ret = 0; + u16 cvs_handle; + + pr_debug("%s: Start playback %d\n", __func__, set); + + mutex_lock(&voice.lock); + + cvs_handle = voice_get_cvs_handle(&voice); + + if (cvs_handle != 0) { + if (set) + ret = voice_cvs_start_playback(&voice); + else + ret = voice_cvs_stop_playback(&voice); + } else { + /* Cache the value for later. */ + pr_debug("%s: Caching ICP value", __func__); + + voice.music_info.pending = set; + } + + mutex_unlock(&voice.lock); + + return ret; +} + +static void voice_auddev_cb_function(u32 evt_id, + union auddev_evt_data *evt_payload, + void *private_data) +{ + struct voice_data *v = &voice; + struct sidetone_cal sidetone_cal_data; + int rc = 0; + pr_aud_info("auddev_cb_function, evt_id=%d,\n", evt_id); + if ((evt_id != AUDDEV_EVT_START_VOICE) || + (evt_id != AUDDEV_EVT_END_VOICE)) { + if (evt_payload == NULL) { + pr_aud_err(" evt_payload is NULL pointer\n"); + return; + } + } + + switch (evt_id) { + case AUDDEV_EVT_START_VOICE: + mutex_lock(&v->lock); + + if ((v->voc_state == VOC_INIT) || + (v->voc_state == VOC_RELEASE)) { + v->v_call_status = VOICE_CALL_START; + if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) + && (v->dev_tx.enabled == VOICE_DEV_ENABLED)) { + rc = voice_apr_register(v); + if (rc < 0) { + mutex_unlock(&v->lock); + pr_aud_err("%s: voice apr registration" + "failed\n", __func__); + return; + } + voice_create_mvm_cvs_session(v); + voice_setup_modem_voice(v); + voice_attach_vocproc(v); + voice_send_start_voice_cmd(v); + get_sidetone_cal(&sidetone_cal_data); + msm_snddev_enable_sidetone( + v->dev_rx.dev_id, + sidetone_cal_data.enable, + sidetone_cal_data.gain); + v->voc_state = VOC_RUN; + + /* Start in-call recording if command was + * pending. */ + if (v->rec_info.pending) { + voice_cvs_start_record(v, + v->rec_info.rec_mode); + + v->rec_info.pending = 0; + } + + /* Start in-call music delivery if command was + * pending. */ + if (v->music_info.pending) { + voice_cvs_start_playback(v); + + v->music_info.pending = 0; + } + } + } + + mutex_unlock(&v->lock); + break; + case AUDDEV_EVT_DEV_CHG_VOICE: + if (v->dev_rx.enabled == VOICE_DEV_ENABLED) + msm_snddev_enable_sidetone(v->dev_rx.dev_id, 0, 0); + v->dev_rx.enabled = VOICE_DEV_DISABLED; + v->dev_tx.enabled = VOICE_DEV_DISABLED; + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) { + /* send cmd to modem to do voice device change */ + voice_disable_vocproc(v); + v->voc_state = VOC_CHANGE; + } + + mutex_unlock(&v->lock); + break; + case AUDDEV_EVT_DEV_RDY: + mutex_lock(&v->lock); + + if (v->voc_state == VOC_CHANGE) { + /* get port Ids */ + if (evt_payload->voc_devinfo.dev_type == DIR_RX) { + v->dev_rx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_rx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_rx.dev_id = + evt_payload->voc_devinfo.dev_id; + v->dev_rx.enabled = VOICE_DEV_ENABLED; + } else { + v->dev_tx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_tx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_tx.enabled = VOICE_DEV_ENABLED; + v->dev_tx.dev_id = + evt_payload->voc_devinfo.dev_id; + } + if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) && + (v->dev_tx.enabled == VOICE_DEV_ENABLED)) { + voice_set_device(v); + get_sidetone_cal(&sidetone_cal_data); + msm_snddev_enable_sidetone( + v->dev_rx.dev_id, + sidetone_cal_data.enable, + sidetone_cal_data.gain); + v->voc_state = VOC_RUN; + } + } else if ((v->voc_state == VOC_INIT) || + (v->voc_state == VOC_RELEASE)) { + /* get AFE ports */ + if (evt_payload->voc_devinfo.dev_type == DIR_RX) { + /* get rx port id */ + v->dev_rx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_rx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_rx.dev_id = + evt_payload->voc_devinfo.dev_id; + v->dev_rx.enabled = VOICE_DEV_ENABLED; + } else { + /* get tx port id */ + v->dev_tx.dev_port_id = + evt_payload->voc_devinfo.dev_port_id; + v->dev_tx.sample = + evt_payload->voc_devinfo.dev_sample; + v->dev_tx.dev_id = + evt_payload->voc_devinfo.dev_id; + v->dev_tx.enabled = VOICE_DEV_ENABLED; + } + if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) && + (v->dev_tx.enabled == VOICE_DEV_ENABLED) && + (v->v_call_status == VOICE_CALL_START)) { + rc = voice_apr_register(v); + if (rc < 0) { + mutex_unlock(&v->lock); + pr_aud_err("%s: voice apr registration" + "failed\n", __func__); + return; + } + voice_create_mvm_cvs_session(v); + voice_setup_modem_voice(v); + voice_attach_vocproc(v); + voice_send_start_voice_cmd(v); + get_sidetone_cal(&sidetone_cal_data); + msm_snddev_enable_sidetone( + v->dev_rx.dev_id, + sidetone_cal_data.enable, + sidetone_cal_data.gain); + v->voc_state = VOC_RUN; + + /* Start in-call recording if command was + * pending. */ + if (v->rec_info.pending) { + voice_cvs_start_record(v, + v->rec_info.rec_mode); + + v->rec_info.pending = 0; + } + + /* Start in-call music delivery if command was + * pending. */ + if (v->music_info.pending) { + voice_cvs_start_playback(v); + + v->music_info.pending = 0; + } + } + } + + mutex_unlock(&v->lock); + break; + case AUDDEV_EVT_DEVICE_VOL_MUTE_CHG: + /* cache the mute and volume index value */ + if (evt_payload->voc_devinfo.dev_type == DIR_TX) { + v->dev_tx.mute = + evt_payload->voc_vm_info.dev_vm_val.mute; + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) + voice_send_mute_cmd_to_modem(v); + + mutex_unlock(&v->lock); + } else { + v->dev_rx.volume = evt_payload-> + voc_vm_info.dev_vm_val.vol; + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) + voice_send_vol_index_to_modem(v); + + mutex_unlock(&v->lock); + } + break; + case AUDDEV_EVT_REL_PENDING: + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) { + voice_disable_vocproc(v); + v->voc_state = VOC_CHANGE; + } + + mutex_unlock(&v->lock); + + if (evt_payload->voc_devinfo.dev_type == DIR_RX) + v->dev_rx.enabled = VOICE_DEV_DISABLED; + else + v->dev_tx.enabled = VOICE_DEV_DISABLED; + + break; + case AUDDEV_EVT_END_VOICE: + /* recover the tx mute and rx volume to the default values */ + v->dev_tx.mute = v->default_mute_val; + v->dev_rx.volume = v->default_vol_val; + if (v->dev_rx.enabled == VOICE_DEV_ENABLED) + msm_snddev_enable_sidetone(v->dev_rx.dev_id, 0, 0); + + mutex_lock(&v->lock); + + if (v->voc_state == VOC_RUN) { + /* call stop modem voice */ + voice_send_stop_voice_cmd(v); + voice_destroy_modem_voice(v); + voice_destroy_mvm_cvs_session(v); + v->voc_state = VOC_RELEASE; + } else if (v->voc_state == VOC_CHANGE) { + voice_send_stop_voice_cmd(v); + voice_destroy_mvm_cvs_session(v); + v->voc_state = VOC_RELEASE; + } + + mutex_unlock(&v->lock); + + v->v_call_status = VOICE_CALL_END; + + break; + default: + pr_aud_err("UNKNOWN EVENT\n"); + } + return; +} +EXPORT_SYMBOL(voice_auddev_cb_function); + +int voice_set_voc_path_full(uint32_t set) +{ + int rc = 0; + + pr_aud_info("%s: %d\n", __func__, set); + + mutex_lock(&voice.lock); + + if (voice.voc_state == VOC_INIT || voice.voc_state == VOC_RELEASE) { + if (set) + voice.voc_path = VOC_PATH_FULL; + else + voice.voc_path = VOC_PATH_PASSIVE; + } else { + pr_aud_err("%s: Invalid voc path set to %d, in state %d\n", + __func__, set, voice.voc_state); + + rc = -EPERM; + } + + mutex_unlock(&voice.lock); + + return rc; +} +EXPORT_SYMBOL(voice_set_voc_path_full); + +void voice_register_mvs_cb(ul_cb_fn ul_cb, + dl_cb_fn dl_cb, + void *private_data) +{ + voice.mvs_info.ul_cb = ul_cb; + voice.mvs_info.dl_cb = dl_cb; + voice.mvs_info.private_data = private_data; +} + +void voice_config_vocoder(uint32_t media_type, + uint32_t rate, + uint32_t network_type, + uint32_t dtx_mode) +{ + voice.mvs_info.media_type = media_type; + voice.mvs_info.rate = rate; + voice.mvs_info.network_type = network_type; + voice.mvs_info.dtx_mode = dtx_mode; +} + +static int32_t modem_mvm_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *ptr; + struct voice_data *v = priv; + + pr_debug("%s\n", __func__); + pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, + data->payload_size, data->opcode); + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s:Reset event received in Voice service MVM\n", + __func__); + apr_reset(v->apr_mvm); + apr_reset(v->apr_q6_mvm); + v->apr_q6_mvm = NULL; + v->apr_mvm = NULL; + v->mvm_handle = 0; + v->mvm_q6_handle = 0; + return 0; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + if (data->payload_size) { + ptr = data->payload; + + pr_debug("%x %x\n", ptr[0], ptr[1]); + /* ping mvm service ACK */ + + if (ptr[0] == + VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION || + ptr[0] == + VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION) { + /* Passive session is used for voice call + * through modem. Full session is used for voice + * call through Q6. */ + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + if (!ptr[1]) { + pr_debug("%s: MVM handle is %d\n", + __func__, data->src_port); + + voice_set_mvm_handle(v, data->src_port); + } else + pr_aud_info("got NACK for sending \ + MVM create session \n"); + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_START_VOICE) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_ATTACH_VOCPROC) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_STOP_VOICE) { + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_DETACH_VOCPROC) { + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_SET_TTY_MODE) { + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == APRV2_IBASIC_CMD_DESTROY_SESSION) { + pr_aud_info("%s: DESTROY resp\n", __func__); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_ATTACH_STREAM) { + pr_aud_info("%s: ATTACH_STREAM resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_IMVM_CMD_DETACH_STREAM) { + pr_debug("%s: DETACH_STREAM resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ICOMMON_CMD_SET_NETWORK) { + pr_debug("%s: SET_NETWORK resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else if (ptr[0] == VSS_ICOMMON_CMD_SET_VOICE_TIMING) { + pr_debug("%s: SET_VOICE_TIMING resp 0x%x\n", + __func__, ptr[1]); + + v->mvm_state = CMD_STATUS_SUCCESS; + wake_up(&v->mvm_wait); + } else + pr_debug("%s: not match cmd = 0x%x\n", + __func__, ptr[0]); + } + } + + return 0; +} + +static int32_t modem_cvs_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *ptr; + struct voice_data *v = priv; + + pr_debug("%s\n", __func__); + pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, + data->payload_size, data->opcode); + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s:Reset event received in Voice service CVS\n", + __func__); + apr_reset(v->apr_cvs); + apr_reset(v->apr_q6_cvs); + v->apr_q6_cvs = NULL; + v->apr_cvs = NULL; + v->cvs_handle = 0; + v->cvs_q6_handle = 0; + return 0; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + if (data->payload_size) { + ptr = data->payload; + + pr_aud_info("%x %x\n", ptr[0], ptr[1]); + /*response from modem CVS */ + if (ptr[0] == + VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION || + ptr[0] == + VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION) { + if (!ptr[1]) { + pr_debug("%s: CVS handle is %d\n", + __func__, data->src_port); + voice_set_cvs_handle(v, data->src_port); + } else + pr_aud_info("got NACK for sending \ + CVS create session \n"); + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA) { + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_SET_MUTE) { + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_SET_MEDIA_TYPE) { + pr_debug("%s: SET_MEDIA resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE) { + pr_debug("%s: SET_AMR_RATE resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE) { + pr_debug("%s: SET_AMR_WB_RATE resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_SET_ENC_DTX_MODE) { + pr_debug("%s: SET_DTX resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == + VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE) { + pr_debug("%s: SET_CDMA_RATE resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == APRV2_IBASIC_CMD_DESTROY_SESSION) { + pr_debug("%s: DESTROY resp\n", __func__); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_START_RECORD) { + pr_debug("%s: START_RECORD resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_STOP_RECORD) { + pr_debug("%s: STOP_RECORD resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); +#ifdef CONFIG_MSM8X60_RTAC + } else if (ptr[0] == VOICE_CMD_SET_PARAM) { + rtac_make_voice_callback(RTAC_CVS, ptr, + data->payload_size); +#endif + } else if (ptr[0] == VSS_ISTREAM_CMD_START_PLAYBACK) { + pr_debug("%s: START_PLAYBACK resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else if (ptr[0] == VSS_ISTREAM_CMD_STOP_PLAYBACK) { + pr_debug("%s: STOP_PLAYBACK resp 0x%x\n", + __func__, ptr[1]); + + v->cvs_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvs_wait); + } else + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + } + } else if (data->opcode == VSS_ISTREAM_EVT_SEND_ENC_BUFFER) { + uint32_t *voc_pkt = data->payload; + uint32_t pkt_len = data->payload_size; + + if (voc_pkt != NULL && v->mvs_info.ul_cb != NULL) { + pr_debug("%s: Media type is 0x%x\n", + __func__, voc_pkt[0]); + + /* Remove media ID from payload. */ + voc_pkt++; + pkt_len = pkt_len - 4; + + v->mvs_info.ul_cb((uint8_t *)voc_pkt, + pkt_len, + v->mvs_info.private_data); + } else { + pr_aud_err("%s: voc_pkt is 0x%x ul_cb is 0x%x\n", + __func__, (unsigned int)voc_pkt, + (unsigned int) v->mvs_info.ul_cb); + } + } else if (data->opcode == VSS_ISTREAM_EVT_SEND_DEC_BUFFER) { + pr_debug("%s: Send dec buf resp\n", __func__); + } else if (data->opcode == VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER) { + struct cvs_send_dec_buf_cmd send_dec_buf; + int ret = 0; + uint32_t pkt_len = 0; + + if (v->mvs_info.dl_cb != NULL) { + send_dec_buf.dec_buf.media_id = v->mvs_info.media_type; + + v->mvs_info.dl_cb( + (uint8_t *)&send_dec_buf.dec_buf.packet_data, + &pkt_len, + v->mvs_info.private_data); + + send_dec_buf.hdr.hdr_field = + APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), + APR_PKT_VER); + send_dec_buf.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, + sizeof(send_dec_buf.dec_buf.media_id) + pkt_len); + send_dec_buf.hdr.src_port = 0; + send_dec_buf.hdr.dest_port = voice_get_cvs_handle(v); + send_dec_buf.hdr.token = 0; + send_dec_buf.hdr.opcode = + VSS_ISTREAM_EVT_SEND_DEC_BUFFER; + + ret = apr_send_pkt(voice_get_apr_cvs(v), + (uint32_t *) &send_dec_buf); + if (ret < 0) { + pr_aud_err("%s: Error %d sending DEC_BUF\n", + __func__, ret); + goto fail; + } + } else { + pr_aud_err("%s: ul_cb is NULL\n", __func__); + } +#ifdef CONFIG_MSM8X60_RTAC + } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) { + rtac_make_voice_callback(RTAC_CVS, data->payload, + data->payload_size); +#endif + + } else { + pr_debug("%s: Unknown opcode 0x%x\n", __func__, data->opcode); + } + +fail: + return 0; +} + +static int32_t modem_cvp_callback(struct apr_client_data *data, void *priv) +{ + uint32_t *ptr; + struct voice_data *v = priv; + + pr_debug("%s\n", __func__); + pr_debug("%s: Payload Length = %d, opcode=%x\n", __func__, + data->payload_size, data->opcode); + + if (data->opcode == RESET_EVENTS) { + pr_debug("%s:Reset event received in Voice service CVP\n", + __func__); + apr_reset(v->apr_cvp); + apr_reset(v->apr_q6_cvp); + v->apr_q6_cvp = NULL; + v->apr_cvp = NULL; + v->cvp_handle = 0; + v->cvp_q6_handle = 0; + return 0; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + if (data->payload_size) { + ptr = data->payload; + + pr_aud_info("%x %x\n", ptr[0], ptr[1]); + /*response from modem CVP */ + if (ptr[0] == + VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + if (!ptr[1]) { + voice_set_cvp_handle(v, data->src_port); + pr_debug("cvphdl=%d\n", data->src_port); + } else + pr_aud_info("got NACK from CVP create \ + session response\n"); + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == + VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA) { + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == VSS_IVOCPROC_CMD_SET_DEVICE) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == + VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == VSS_IVOCPROC_CMD_ENABLE) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == VSS_IVOCPROC_CMD_DISABLE) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == APRV2_IBASIC_CMD_DESTROY_SESSION) { + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); + } else if (ptr[0] == + VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE + ) { + + pr_debug("%s: cmd = 0x%x\n", __func__, ptr[0]); + v->cvp_state = CMD_STATUS_SUCCESS; + wake_up(&v->cvp_wait); +#ifdef CONFIG_MSM8X60_RTAC + } else if (ptr[0] == VOICE_CMD_SET_PARAM) { + rtac_make_voice_callback(RTAC_CVP, ptr, + data->payload_size); +#endif + } else + pr_debug("%s: not match cmd = 0x%x\n", + __func__, ptr[0]); + } +#ifdef CONFIG_MSM8X60_RTAC + } else if (data->opcode == VOICE_EVT_GET_PARAM_ACK) { + rtac_make_voice_callback(RTAC_CVP, data->payload, + data->payload_size); +#endif + } + return 0; +} + + +static int __init voice_init(void) +{ + int rc = 0; + struct voice_data *v = &voice; + pr_aud_info("%s\n", __func__); /* Macro prints the file name and function */ + /* set default value */ + v->default_mute_val = 1; /* default is mute */ + v->default_vol_val = 0; + v->default_sample_val = 8000; + + /* initialize dev_rx and dev_tx */ + memset(&v->dev_tx, 0, sizeof(struct device_data)); + memset(&v->dev_rx, 0, sizeof(struct device_data)); + v->dev_rx.volume = v->default_vol_val; + v->dev_tx.mute = v->default_mute_val; + + v->voc_state = VOC_INIT; + v->voc_path = VOC_PATH_PASSIVE; + v->adsp_version = 0; + init_waitqueue_head(&v->mvm_wait); + init_waitqueue_head(&v->cvs_wait); + init_waitqueue_head(&v->cvp_wait); + + mutex_init(&v->lock); + + v->mvm_handle = 0; + v->cvs_handle = 0; + v->cvp_handle = 0; + + v->mvm_q6_handle = 0; + v->cvs_q6_handle = 0; + v->cvp_q6_handle = 0; + + v->apr_mvm = NULL; + v->apr_cvs = NULL; + v->apr_cvp = NULL; + + v->apr_q6_mvm = NULL; + v->apr_q6_cvs = NULL; + v->apr_q6_cvp = NULL; + + /* Initialize MVS info. */ + memset(&v->mvs_info, 0, sizeof(v->mvs_info)); + v->mvs_info.network_type = VSS_NETWORK_ID_DEFAULT; + + v->rec_info.pending = 0; + v->rec_info.rec_mode = VOC_REC_NONE; + + memset(&v->music_info, 0, sizeof(v->music_info)); + + v->device_events = AUDDEV_EVT_DEV_CHG_VOICE | + AUDDEV_EVT_DEV_RDY | + AUDDEV_EVT_REL_PENDING | + AUDDEV_EVT_START_VOICE | + AUDDEV_EVT_END_VOICE | + AUDDEV_EVT_DEVICE_VOL_MUTE_CHG | + AUDDEV_EVT_FREQ_CHG; + + pr_debug("to register call back\n"); + /* register callback to auddev */ + auddev_register_evt_listner(v->device_events, AUDDEV_CLNT_VOC, + 0, voice_auddev_cb_function, v); + + return rc; +} + +device_initcall(voice_init); diff --git a/arch/arm/mach-msm/qdsp6v3/qcelp_in.c b/arch/arm/mach-msm/qdsp6v3/qcelp_in.c new file mode 100644 index 00000000000..f1986cef7ad --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/qcelp_in.c @@ -0,0 +1,334 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((35+sizeof(struct meta_out_dsp)) * 10)) + +void q6asm_qcelp_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in * audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode - %d\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_CMDRSP_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM: + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS: + break; + case ASM_SESSION_EVENT_TX_OVERFLOW: + pr_aud_err("%s:session id %d:ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + default: + pr_aud_err("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} +/* ------------------- device --------------------- */ +static long qcelp_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_qcelp_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_aud_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + /* reduced_rate_level, rate_modulation_cmd set to zero + currently not configurable from user space */ + rc = q6asm_enc_cfg_blk_qcelp(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate, 0, 0); + + if (rc < 0) { + pr_aud_err("%s:session id %d: cmd qcelp media format block\ + failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_aud_err("%s:session id %d: media format block\ + failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__, + audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_aud_err("%s:session id %d: Audio Start procedure failed\ + rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_aud_err("%s:session id %d: Audio Stop procedure failed\ + rc=%d\n", __func__, audio->ac->session, + rc); + break; + } + break; + } + case AUDIO_GET_QCELP_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_qcelp_enc_config))) + rc = -EFAULT; + break; + } + case AUDIO_SET_QCELP_ENC_CONFIG: { + struct msm_audio_qcelp_enc_config cfg; + struct msm_audio_qcelp_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_qcelp_enc_config))) { + rc = -EFAULT; + break; + } + + if (cfg.min_bit_rate > 4 || + cfg.min_bit_rate < 1) { + pr_aud_err("%s:session id %d: invalid min bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + if (cfg.max_bit_rate > 4 || + cfg.max_bit_rate < 1) { + pr_aud_err("%s:session id %d: invalid max bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->min_bit_rate = cfg.min_bit_rate; + enc_cfg->max_bit_rate = cfg.max_bit_rate; + pr_debug("%s:session id %d: min_bit_rate= 0x%x\ + max_bit_rate=0x%x\n", __func__, + audio->ac->session, enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate); + break; + } + default: + rc = -EINVAL; + } + return rc; +} + +static int qcelp_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_qcelp_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for qcelp\ + driver\n", __func__, audio->ac->session); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_qcelp_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_aud_err("%s:session id %d: Could not allocate memory for aac\ + config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 35; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->min_bit_rate = 4; + enc_cfg->max_bit_rate = 4; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_qcelp_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_aud_err("%s:session id %d: Could not allocate memory for audio\ + client\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open qcelp encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_V13K, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_aud_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: NT mode encoder success\n", __func__, + audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_V13K); + if (rc < 0) { + pr_aud_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_aud_err("%s:session id %d: TX Overflow registration\ + failed rc=%d\n", __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_aud_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_aud_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_ioctl = qcelp_in_ioctl; + file->private_data = audio; + + pr_aud_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = qcelp_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +}; + +struct miscdevice audio_qcelp_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_qcelp_in", + .fops = &audio_in_fops, +}; + +static int __init qcelp_in_init(void) +{ + return misc_register(&audio_qcelp_in_misc); +} + +device_initcall(qcelp_in_init); diff --git a/arch/arm/mach-msm/qdsp6v3/rtac.h b/arch/arm/mach-msm/qdsp6v3/rtac.h new file mode 100644 index 00000000000..fd7f85f0e51 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/rtac.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __RTAC_H__ +#define __RTAC_H__ + +#ifdef CONFIG_MSM8X60_RTAC + +#include +#include + +/* Voice Modes */ +#define RTAC_CVP 0 +#define RTAC_CVS 1 +#define RTAC_VOICE_MODES 2 + +void update_rtac(u32 evt_id, u32 dev_id, struct msm_snddev_info *dev_info); +void rtac_add_adm_device(u32 port_id, u32 popp_id); +void rtac_remove_adm_device(u32 port_id); +void rtac_add_voice(struct voice_data *v); +void rtac_remove_voice(struct voice_data *v); +void rtac_set_adm_handle(void *handle); +bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size); +void rtac_copy_adm_payload_to_user(void *payload, u32 payload_size); +void rtac_set_asm_handle(u32 session_id, void *handle); +bool rtac_make_asm_callback(u32 session_id, uint32_t *payload, + u32 payload_size); +void rtac_copy_asm_payload_to_user(void *payload, u32 payload_size); +void rtac_set_voice_handle(u32 mode, void *handle); +bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size); +void rtac_copy_voice_payload_to_user(void *payload, u32 payload_size); + +#endif + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c b/arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c new file mode 100644 index 00000000000..fc944ef75d3 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_ecodec.c @@ -0,0 +1,390 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ECODEC_SAMPLE_RATE 8000 +static struct q6v2audio_ecodec_ops default_audio_ops; +static struct q6v2audio_ecodec_ops *audio_ops = &default_audio_ops; + +/* Context for each external codec device */ +struct snddev_ecodec_state { + struct snddev_ecodec_data *data; + u32 sample_rate; +}; + +/* Global state for the driver */ +struct snddev_ecodec_drv_state { + struct mutex dev_lock; + int ref_cnt; /* ensure one rx device at a time */ + struct clk *ecodec_clk; +}; + +static struct snddev_ecodec_drv_state snddev_ecodec_drv; + +struct aux_pcm_state { + unsigned int dout; + unsigned int din; + unsigned int syncout; + unsigned int clkin_a; +}; + +static struct aux_pcm_state the_aux_pcm_state; + +static int aux_pcm_gpios_request(void) +{ + int rc = 0; + uint32_t bt_config_gpio[] = { + GPIO_CFG(the_aux_pcm_state.dout, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.din, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.syncout, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.clkin_a, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + }; + pr_debug("%s\n", __func__); + gpio_tlmm_config(bt_config_gpio[0], GPIO_CFG_ENABLE); + gpio_tlmm_config(bt_config_gpio[1], GPIO_CFG_ENABLE); + gpio_tlmm_config(bt_config_gpio[2], GPIO_CFG_ENABLE); + gpio_tlmm_config(bt_config_gpio[3], GPIO_CFG_ENABLE); + + return rc; +} + +static void aux_pcm_gpios_free(void) +{ + uint32_t bt_config_gpio[] = { + GPIO_CFG(the_aux_pcm_state.dout, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.din, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.syncout, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + GPIO_CFG(the_aux_pcm_state.clkin_a, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), + }; + pr_debug("%s\n", __func__); + gpio_tlmm_config(bt_config_gpio[0], GPIO_CFG_DISABLE); + gpio_tlmm_config(bt_config_gpio[1], GPIO_CFG_DISABLE); + gpio_tlmm_config(bt_config_gpio[2], GPIO_CFG_DISABLE); + gpio_tlmm_config(bt_config_gpio[3], GPIO_CFG_DISABLE); +} + +static int get_aux_pcm_gpios(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res; + + /* Claim all of the GPIOs. */ + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_dout"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM DOUT\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.dout = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_din"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM DIN\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.din = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "aux_pcm_syncout"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM SYNC OUT\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.syncout = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "aux_pcm_clkin_a"); + if (!res) { + pr_aud_err("%s: failed to get gpio AUX PCM CLKIN A\n", __func__); + return -ENODEV; + } + + the_aux_pcm_state.clkin_a = res->start; + + pr_aud_info("%s: dout = %u, din = %u , syncout = %u, clkin_a =%u\n", + __func__, the_aux_pcm_state.dout, the_aux_pcm_state.din, + the_aux_pcm_state.syncout, the_aux_pcm_state.clkin_a); + + return rc; +} + +static int aux_pcm_probe(struct platform_device *pdev) +{ + int rc = 0; + + pr_aud_info("%s:\n", __func__); + + rc = get_aux_pcm_gpios(pdev); + if (rc < 0) { + pr_aud_err("%s: GPIO configuration failed\n", __func__); + return -ENODEV; + } + return rc; +} + +static struct platform_driver aux_pcm_driver = { + .probe = aux_pcm_probe, + .driver = { .name = "msm_aux_pcm"} +}; + +static int snddev_ecodec_open(struct msm_snddev_info *dev_info) +{ + int rc; + struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; + union afe_port_config afe_config; + + pr_debug("%s\n", __func__); + + mutex_lock(&drv->dev_lock); + + if (dev_info->opened) { + pr_aud_err("%s: ERROR: %s already opened\n", __func__, + dev_info->name); + mutex_unlock(&drv->dev_lock); + return -EBUSY; + } + + if (drv->ref_cnt != 0) { + pr_debug("%s: opened %s\n", __func__, dev_info->name); + drv->ref_cnt++; + mutex_unlock(&drv->dev_lock); + return 0; + } + + pr_aud_info("%s: opening %s\n", __func__, dev_info->name); + + rc = aux_pcm_gpios_request(); + if (rc < 0) { + pr_aud_err("%s: GPIO request failed\n", __func__); + return rc; + } + + clk_reset(drv->ecodec_clk, CLK_RESET_ASSERT); + + afe_config.pcm.mode = AFE_PCM_CFG_MODE_PCM; + afe_config.pcm.sync = AFE_PCM_CFG_SYNC_INT; + afe_config.pcm.frame = AFE_PCM_CFG_FRM_256BPF; + afe_config.pcm.quant = AFE_PCM_CFG_QUANT_LINEAR_NOPAD; + afe_config.pcm.slot = 0; + afe_config.pcm.data = AFE_PCM_CFG_CDATAOE_MASTER; + + rc = afe_open(PCM_RX, &afe_config, ECODEC_SAMPLE_RATE); + if (rc < 0) { + pr_aud_err("%s: afe open failed for PCM_RX\n", __func__); + goto err_rx_afe; + } + + rc = afe_open(PCM_TX, &afe_config, ECODEC_SAMPLE_RATE); + if (rc < 0) { + pr_aud_err("%s: afe open failed for PCM_TX\n", __func__); + goto err_tx_afe; + } + + rc = clk_set_rate(drv->ecodec_clk, 2048000); + if (rc < 0) { + pr_aud_err("%s: clk_set_rate failed\n", __func__); + goto err_clk; + } + + clk_enable(drv->ecodec_clk); + + clk_reset(drv->ecodec_clk, CLK_RESET_DEASSERT); + + drv->ref_cnt++; + mutex_unlock(&drv->dev_lock); + + return 0; + +err_clk: + afe_close(PCM_TX); +err_tx_afe: + afe_close(PCM_RX); +err_rx_afe: + aux_pcm_gpios_free(); + mutex_unlock(&drv->dev_lock); + return -ENODEV; +} + +int snddev_ecodec_close(struct msm_snddev_info *dev_info) +{ + struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; + + pr_debug("%s: closing %s\n", __func__, dev_info->name); + + mutex_lock(&drv->dev_lock); + + if (!dev_info->opened) { + pr_aud_err("%s: ERROR: %s is not opened\n", __func__, + dev_info->name); + mutex_unlock(&drv->dev_lock); + return -EPERM; + } + + drv->ref_cnt--; + + if (drv->ref_cnt == 0) { + + pr_aud_info("%s: closing all devices\n", __func__); + + clk_disable(drv->ecodec_clk); + aux_pcm_gpios_free(); + + afe_close(PCM_RX); + afe_close(PCM_TX); + } + + mutex_unlock(&drv->dev_lock); + + return 0; +} + +int snddev_ecodec_set_freq(struct msm_snddev_info *dev_info, u32 rate) +{ + int rc = 0; + + if (!dev_info) { + rc = -EINVAL; + goto error; + } + return ECODEC_SAMPLE_RATE; + +error: + return rc; +} + +void htc_8x60_register_ecodec_ops(struct q6v2audio_ecodec_ops *ops) +{ + audio_ops = ops; +} + +static int snddev_ecodec_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_ecodec_data *pdata; + struct msm_snddev_info *dev_info; + struct snddev_ecodec_state *ecodec; + + pr_aud_info("%s:\n", __func__); + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + rc = -1; + goto error; + } + pdata = pdev->dev.platform_data; + + ecodec = kzalloc(sizeof(struct snddev_ecodec_state), GFP_KERNEL); + if (!ecodec) { + rc = -ENOMEM; + goto error; + } + + dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + kfree(ecodec); + rc = -ENOMEM; + goto error; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->private_data = (void *)ecodec; + dev_info->dev_ops.open = snddev_ecodec_open; + dev_info->dev_ops.close = snddev_ecodec_close; + dev_info->dev_ops.set_freq = snddev_ecodec_set_freq; + dev_info->dev_ops.enable_sidetone = NULL; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + + msm_snddev_register(dev_info); + + ecodec->data = pdata; + ecodec->sample_rate = ECODEC_SAMPLE_RATE; /* Default to 8KHz */ +error: + return rc; +} + +struct platform_driver snddev_ecodec_driver = { + .probe = snddev_ecodec_probe, + .driver = {.name = "msm_snddev_ecodec"} +}; + +int __init snddev_ecodec_init(void) +{ + int rc = 0; + struct snddev_ecodec_drv_state *drv = &snddev_ecodec_drv; + + pr_aud_info("%s:\n", __func__); + + mutex_init(&drv->dev_lock); + drv->ref_cnt = 0; + + drv->ecodec_clk = clk_get(NULL, "pcm_clk"); + if (IS_ERR(drv->ecodec_clk)) { + pr_aud_err("%s: could not get pcm_clk\n", __func__); + return PTR_ERR(drv->ecodec_clk); + } + + rc = platform_driver_register(&aux_pcm_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for aux pcm failed\n", + __func__); + goto error_aux_pcm_platform_driver; + } + + rc = platform_driver_register(&snddev_ecodec_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for ecodec failed\n", + __func__); + goto error_ecodec_platform_driver; + } + pr_aud_info("%s: done\n", __func__); + + return 0; + +error_ecodec_platform_driver: + platform_driver_unregister(&aux_pcm_driver); +error_aux_pcm_platform_driver: + clk_put(drv->ecodec_clk); + + pr_aud_err("%s: encounter error\n", __func__); + return -ENODEV; +} + +device_initcall(snddev_ecodec_init); + +MODULE_DESCRIPTION("ECodec Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c b/arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c new file mode 100644 index 00000000000..9e661c8b1db --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_hdmi.c @@ -0,0 +1,182 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_MUTEX(snddev_hdmi_lock); +static int snddev_hdmi_active; + +static int snddev_hdmi_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + union afe_port_config afe_config; + struct snddev_hdmi_data *snddev_hdmi_data; + + if (!dev_info) { + pr_aud_err("msm_snddev_info is null\n"); + return -EINVAL; + } + + snddev_hdmi_data = dev_info->private_data; + + mutex_lock(&snddev_hdmi_lock); + + if (snddev_hdmi_active) { + pr_aud_err("HDMI snddev already active\n"); + mutex_unlock(&snddev_hdmi_lock); + return -EBUSY; + } + afe_config.hdmi.channel_mode = snddev_hdmi_data->channel_mode; + afe_config.hdmi.bitwidth = 16; + afe_config.hdmi.data_type = 0; + rc = afe_open(snddev_hdmi_data->copp_id, &afe_config, + dev_info->sample_rate); + + if (rc < 0) { + pr_aud_err("afe_open failed\n"); + mutex_unlock(&snddev_hdmi_lock); + return -EINVAL; + } + snddev_hdmi_active = 1; + + pr_debug("%s open done\n", dev_info->name); + + mutex_unlock(&snddev_hdmi_lock); + + return 0; +} + +static int snddev_hdmi_close(struct msm_snddev_info *dev_info) +{ + if (!dev_info) { + pr_aud_err("msm_snddev_info is null\n"); + return -EINVAL; + } + + if (!dev_info->opened) { + pr_aud_err("calling close device with out opening the" + " device\n"); + return -EPERM; + } + mutex_lock(&snddev_hdmi_lock); + + if (!snddev_hdmi_active) { + pr_aud_err("HDMI snddev not active\n"); + mutex_unlock(&snddev_hdmi_lock); + return -EPERM; + } + snddev_hdmi_active = 0; + + afe_close(HDMI_RX); + + pr_debug("%s closed\n", dev_info->name); + mutex_unlock(&snddev_hdmi_lock); + + return 0; +} + +static int snddev_hdmi_set_freq(struct msm_snddev_info *dev_info, u32 req_freq) +{ + if (req_freq != 48000) { + pr_debug("Unsupported Frequency:%d\n", req_freq); + return -EINVAL; + } + return 48000; +} + +static int snddev_hdmi_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_hdmi_data *pdata; + struct msm_snddev_info *dev_info; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + return -ENODEV; + } + + pdata = pdev->dev.platform_data; + if (!(pdata->capability & SNDDEV_CAP_RX)) { + pr_aud_err("invalid device data either RX or TX\n"); + return -ENODEV; + } + + dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + pr_aud_err("unable to allocate memeory for msm_snddev_info\n"); + return -ENOMEM; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->acdb_id = pdata->acdb_id; + dev_info->private_data = (void *)pdata; + dev_info->dev_ops.open = snddev_hdmi_open; + dev_info->dev_ops.close = snddev_hdmi_close; + dev_info->dev_ops.set_freq = snddev_hdmi_set_freq; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + msm_snddev_register(dev_info); + dev_info->sample_rate = pdata->default_sample_rate; + + pr_debug("probe done for %s\n", pdata->name); + return rc; +} + +static struct platform_driver snddev_hdmi_driver = { + .probe = snddev_hdmi_probe, + .driver = {.name = "snddev_hdmi"} +}; + +static int __init snddev_hdmi_init(void) +{ + s32 rc; + + rc = platform_driver_register(&snddev_hdmi_driver); + if (IS_ERR_VALUE(rc)) { + + pr_aud_err("platform_driver_register failed.\n"); + goto error_platform_driver; + } + + pr_debug("snddev_hdmi_init : done\n"); + + return 0; + +error_platform_driver: + + pr_aud_err("encounterd error\n"); + return -ENODEV; +} + +module_init(snddev_hdmi_init); + +MODULE_DESCRIPTION("HDMI Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_icodec.c b/arch/arm/mach-msm/qdsp6v3/snddev_icodec.c new file mode 100644 index 00000000000..f23797cf3ba --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_icodec.c @@ -0,0 +1,1189 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audio_acdb.h" + +#define SNDDEV_ICODEC_PCM_SZ 32 /* 16 bit / sample stereo mode */ +#define SNDDEV_ICODEC_MUL_FACTOR 3 /* Multi by 8 Shift by 3 */ +#define SNDDEV_ICODEC_CLK_RATE(freq) \ + (((freq) * (SNDDEV_ICODEC_PCM_SZ)) << (SNDDEV_ICODEC_MUL_FACTOR)) +#define SNDDEV_LOW_POWER_MODE 0 +#define SNDDEV_HIGH_POWER_MODE 1 +/* Voltage required for S4 in microVolts, 2.2V or 2200000microvolts */ +#define SNDDEV_VREG_8058_S4_VOLTAGE (2200000) +/* Load Current required for S4 in microAmps, + 36mA - 56mA */ +#define SNDDEV_VREG_LOW_POWER_LOAD (36000) +#define SNDDEV_VREG_HIGH_POWER_LOAD (56000) + +static struct q6v2audio_icodec_ops default_audio_ops; +static struct q6v2audio_icodec_ops *audio_ops = &default_audio_ops; +static int support_aic3254; +static int support_adie; +static int support_aic3254_use_mclk; +static int aic3254_use_mclk_counter; +int msm_codec_i2s_slave_mode = 1; +static struct q6v2audio_aic3254_ops default_aic3254_ops; +static struct q6v2audio_aic3254_ops *aic3254_ops = &default_aic3254_ops; + +/* Global state for the driver */ +struct snddev_icodec_drv_state { + struct mutex rx_lock; + struct mutex tx_lock; + u32 rx_active; /* ensure one rx device at a time */ + u32 tx_active; /* ensure one tx device at a time */ + struct clk *rx_osrclk; + struct clk *rx_bitclk; + struct clk *tx_osrclk; + struct clk *tx_bitclk; + + struct wake_lock rx_idlelock; + struct wake_lock tx_idlelock; + + /* handle to pmic8058 regulator smps4 */ + struct regulator *snddev_vreg; + + struct mutex rx_mclk_lock; +}; + +static struct snddev_icodec_drv_state snddev_icodec_drv; + +struct regulator *vreg_init(void) +{ + int rc; + struct regulator *vreg_ptr; + + vreg_ptr = regulator_get(NULL, "8058_s4"); + if (IS_ERR(vreg_ptr)) { + pr_aud_err("%s: regulator_get 8058_s4 failed\n", __func__); + return NULL; + } + + rc = regulator_set_voltage(vreg_ptr, SNDDEV_VREG_8058_S4_VOLTAGE, + SNDDEV_VREG_8058_S4_VOLTAGE); + if (rc == 0) + return vreg_ptr; + else + return NULL; +} + +static void vreg_deinit(struct regulator *vreg) +{ + regulator_put(vreg); +} + +static void vreg_mode_vote(struct regulator *vreg, int enable, int mode) +{ + int rc; + if (enable) { + rc = regulator_enable(vreg); + if (rc != 0) + pr_aud_err("%s:Enabling regulator failed\n", __func__); + else { + if (mode) + regulator_set_optimum_mode(vreg, + SNDDEV_VREG_HIGH_POWER_LOAD); + else + regulator_set_optimum_mode(vreg, + SNDDEV_VREG_LOW_POWER_LOAD); + } + } else { + rc = regulator_disable(vreg); + if (rc != 0) + pr_aud_err("%s:Disabling regulator failed\n", __func__); + } +} + +struct msm_cdcclk_ctl_state { + unsigned int rx_mclk; + unsigned int rx_mclk_requested; + unsigned int tx_mclk; + unsigned int tx_mclk_requested; +}; + +static struct msm_cdcclk_ctl_state the_msm_cdcclk_ctl_state; + +static int msm_snddev_rx_mclk_request(void) +{ + int rc = 0; +/* + rc = gpio_request(the_msm_cdcclk_ctl_state.rx_mclk, + "MSM_SNDDEV_RX_MCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MSM SNDDEV RX failed\n", __func__); + return rc; + } + the_msm_cdcclk_ctl_state.rx_mclk_requested = 1; +*/ + return rc; +} +static int msm_snddev_tx_mclk_request(void) +{ + int rc = 0; +/* + rc = gpio_request(the_msm_cdcclk_ctl_state.tx_mclk, + "MSM_SNDDEV_TX_MCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MSM SNDDEV TX failed\n", __func__); + return rc; + } + the_msm_cdcclk_ctl_state.tx_mclk_requested = 1; +*/ + return rc; +} +static void msm_snddev_rx_mclk_free(void) +{ + if (the_msm_cdcclk_ctl_state.rx_mclk_requested) { + gpio_free(the_msm_cdcclk_ctl_state.rx_mclk); + the_msm_cdcclk_ctl_state.rx_mclk_requested = 0; + } +} +static void msm_snddev_tx_mclk_free(void) +{ + if (the_msm_cdcclk_ctl_state.tx_mclk_requested) { + gpio_free(the_msm_cdcclk_ctl_state.tx_mclk); + the_msm_cdcclk_ctl_state.tx_mclk_requested = 0; + } +} + +static int get_msm_cdcclk_ctl_gpios(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res; + + /* Claim all of the GPIOs. */ + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "msm_snddev_rx_mclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MSM SNDDEV RX\n", __func__); + return -ENODEV; + } + the_msm_cdcclk_ctl_state.rx_mclk = res->start; + the_msm_cdcclk_ctl_state.rx_mclk_requested = 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "msm_snddev_tx_mclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MSM SNDDEV TX\n", __func__); + return -ENODEV; + } + the_msm_cdcclk_ctl_state.tx_mclk = res->start; + the_msm_cdcclk_ctl_state.tx_mclk_requested = 0; + + return rc; +} +static int msm_cdcclk_ctl_probe(struct platform_device *pdev) +{ + int rc = 0; + + pr_aud_info("%s:\n", __func__); + + rc = get_msm_cdcclk_ctl_gpios(pdev); + if (rc < 0) { + pr_aud_err("%s: GPIO configuration failed\n", __func__); + return -ENODEV; + } + return rc; +} +static struct platform_driver msm_cdcclk_ctl_driver = { + .probe = msm_cdcclk_ctl_probe, + .driver = { .name = "msm_cdcclk_ctl"} +}; + +static int snddev_icodec_rxclk_enable(struct snddev_icodec_state *icodec, + int en) +{ + int trc; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + mutex_lock(&drv->rx_mclk_lock); + if (en) { + if (aic3254_use_mclk_counter == 0) { + drv->rx_osrclk = clk_get(0, "i2s_spkr_osr_clk"); + if (IS_ERR(drv->rx_osrclk)) { + pr_aud_err("%s turning on RX MCLK Error\n", \ + __func__); + goto error_invalid_osrclk; + } + + trc = clk_set_rate(drv->rx_osrclk, \ + SNDDEV_ICODEC_CLK_RATE(\ + icodec->sample_rate)); + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting RX m clock1\n"); + goto error_invalid_freq; + } + clk_enable(drv->rx_osrclk); + } + + aic3254_use_mclk_counter++; + + } else { + if (aic3254_use_mclk_counter > 0) { + aic3254_use_mclk_counter--; + if (aic3254_use_mclk_counter == 0) + clk_disable(drv->rx_osrclk); + } else + pr_aud_info("%s: counter error!\n", __func__); + } + + mutex_unlock(&drv->rx_mclk_lock); + + pr_aud_info("%s: en: %d counter: %d\n", __func__, en, \ + aic3254_use_mclk_counter); + + return 0; + +error_invalid_osrclk: +error_invalid_freq: + pr_aud_err("%s: encounter error\n", __func__); + msm_snddev_rx_mclk_free(); + + mutex_unlock(&drv->rx_mclk_lock); + return -ENODEV; +} + +static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec) +{ + int trc; + int rc_clk; + int afe_channel_mode; + union afe_port_config afe_config; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + wake_lock(&drv->rx_idlelock); + + if (drv->snddev_vreg) { + if (!strcmp(icodec->data->name, "headset_stereo_rx")) + vreg_mode_vote(drv->snddev_vreg, 1, + SNDDEV_LOW_POWER_MODE); + else + vreg_mode_vote(drv->snddev_vreg, 1, + SNDDEV_HIGH_POWER_MODE); + } + + if (support_aic3254_use_mclk) { + rc_clk = snddev_icodec_rxclk_enable(icodec, 1); + if (IS_ERR_VALUE(rc_clk)) { + pr_aud_err("%s Enable RX master clock Error\n", \ + __func__); + goto error_invalid_freq; + } + } else { + msm_snddev_rx_mclk_request(); + + drv->rx_osrclk = clk_get(0, "i2s_spkr_osr_clk"); + if (IS_ERR(drv->rx_osrclk)) + pr_aud_err("%s master clock Error\n", __func__); + + trc = clk_set_rate(drv->rx_osrclk, + SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting m clock1\n"); + goto error_invalid_freq; + } + + clk_enable(drv->rx_osrclk); + } + + drv->rx_bitclk = clk_get(0, "i2s_spkr_bit_clk"); + if (IS_ERR(drv->rx_bitclk)) + pr_aud_err("%s clock Error\n", __func__); + + /* *************************************** + * 1. CPU MASTER MODE: + * Master clock = Sample Rate * OSR rate bit clock + * OSR Rate bit clock = bit/sample * channel master + * clock / bit clock = divider value = 8 + * + * 2. CPU SLAVE MODE: + * bitclk = 0 + * *************************************** */ + + if (msm_codec_i2s_slave_mode) { + pr_debug("%s: configuring bit clock for slave mode\n", + __func__); + trc = clk_set_rate(drv->rx_bitclk, 0); + } else + trc = clk_set_rate(drv->rx_bitclk, 8); + + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting m clock1\n"); + goto error_adie; + } + clk_enable(drv->rx_bitclk); + + if (icodec->data->voltage_on) + icodec->data->voltage_on(1); + + if (support_aic3254) { + if (aic3254_ops->aic3254_set_mode) { + if (msm_get_call_state() == 1) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_RX, + icodec->data->aic3254_voc_id); + else + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_RX, + icodec->data->aic3254_id); + } + } + if (support_adie) { + /* Configure ADIE */ + trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); + if (IS_ERR_VALUE(trc)) + pr_aud_err("%s: adie codec open failed\n", __func__); + else + adie_codec_setpath(icodec->adie_path, + icodec->sample_rate, 256); + /* OSR default to 256, can be changed for power optimization + * If OSR is to be changed, need clock API for setting the divider + */ + } + + switch (icodec->data->channel_mode) { + case 2: + afe_channel_mode = MSM_AFE_STEREO; + break; + case 1: + default: + afe_channel_mode = MSM_AFE_MONO; + break; + } + afe_config.mi2s.channel = afe_channel_mode; + afe_config.mi2s.bitwidth = 16; + afe_config.mi2s.line = 1; + if (msm_codec_i2s_slave_mode) + afe_config.mi2s.ws = 0; + else + afe_config.mi2s.ws = 1; + + trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate); + + if (support_adie) { + /* Enable ADIE */ + if (icodec->adie_path) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_READY); + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_ANALOG_READY); + } + + if (msm_codec_i2s_slave_mode) + adie_codec_set_master_mode(icodec->adie_path, 1); + else + adie_codec_set_master_mode(icodec->adie_path, 0); + } + /* Enable power amplifier */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(1); + + icodec->enabled = 1; + + wake_unlock(&drv->rx_idlelock); + return 0; + +error_adie: + clk_disable(drv->rx_bitclk); + clk_disable(drv->rx_osrclk); +error_invalid_freq: + + pr_aud_err("%s: encounter error\n", __func__); + msm_snddev_rx_mclk_free(); + + wake_unlock(&drv->rx_idlelock); + return -ENODEV; +} + +static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec) +{ + int trc; + int rc_clk; + int afe_channel_mode; + union afe_port_config afe_config; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;; + + wake_lock(&drv->tx_idlelock); + + if (drv->snddev_vreg) + vreg_mode_vote(drv->snddev_vreg, 1, SNDDEV_HIGH_POWER_MODE); + + if (support_aic3254_use_mclk) { + rc_clk = snddev_icodec_rxclk_enable(icodec, 1); + if (IS_ERR_VALUE(rc_clk)) { + pr_aud_err("%s Enable RX master clock Error\n", \ + __func__); + goto error_invalid_osrclk; + } + } + + msm_snddev_tx_mclk_request(); + + drv->tx_osrclk = clk_get(0, "i2s_mic_osr_clk"); + if (IS_ERR(drv->tx_osrclk)) { + pr_aud_err("%s master clock Error\n", __func__); + goto error_invalid_osrclk; + } + + trc = clk_set_rate(drv->tx_osrclk, + SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); + if (IS_ERR_VALUE(trc)) { + pr_aud_err("ERROR setting m clock1\n"); + goto error_invalid_freq; + } + + clk_enable(drv->tx_osrclk); + + drv->tx_bitclk = clk_get(0, "i2s_mic_bit_clk"); + if (IS_ERR(drv->tx_bitclk)) { + pr_aud_err("%s clock Error\n", __func__); + goto error_invalid_bitclk; + } + + /* *************************************** + * 1. CPU MASTER MODE: + * Master clock = Sample Rate * OSR rate bit clock + * OSR Rate bit clock = bit/sample * channel master + * clock / bit clock = divider value = 8 + * + * 2. CPU SLAVE MODE: + * bitclk = 0 + * *************************************** */ + if (msm_codec_i2s_slave_mode) { + pr_debug("%s: configuring bit clock for slave mode\n", + __func__); + trc = clk_set_rate(drv->tx_bitclk, 0); + } else + trc = clk_set_rate(drv->tx_bitclk, 8); + + clk_enable(drv->tx_bitclk); + + if (support_aic3254) { + if (aic3254_ops->aic3254_set_mode) { + if (msm_get_call_state() == 1) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_TX, + icodec->data->aic3254_voc_id); + else + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_TX, + icodec->data->aic3254_id); + } + } + if (support_adie) { + /* Enable ADIE */ + trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); + if (IS_ERR_VALUE(trc)) + pr_aud_err("%s: adie codec open failed\n", __func__); + else + adie_codec_setpath(icodec->adie_path, + icodec->sample_rate, 256); + } + switch (icodec->data->channel_mode) { + case 2: + afe_channel_mode = MSM_AFE_STEREO; + break; + case 1: + default: + afe_channel_mode = MSM_AFE_MONO; + break; + } + afe_config.mi2s.channel = afe_channel_mode; + afe_config.mi2s.bitwidth = 16; + afe_config.mi2s.line = 1; + if (msm_codec_i2s_slave_mode) + afe_config.mi2s.ws = 0; + else + afe_config.mi2s.ws = 1; + + trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate); + + if (icodec->adie_path && support_adie) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_READY); + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_ANALOG_READY); + + if (msm_codec_i2s_slave_mode) + adie_codec_set_master_mode(icodec->adie_path, 1); + else + adie_codec_set_master_mode(icodec->adie_path, 0); + } + + /* Reuse pamp_on for TX platform-specific setup */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(1); + + icodec->enabled = 1; + + wake_unlock(&drv->tx_idlelock); + return 0; + +error_invalid_bitclk: + clk_disable(drv->tx_osrclk); +error_invalid_freq: +error_invalid_osrclk: + if (icodec->data->pamp_on) + icodec->data->pamp_on(0); + msm_snddev_tx_mclk_free(); + + pr_aud_err("%s: encounter error\n", __func__); + + wake_unlock(&drv->tx_idlelock); + return -ENODEV; +} + +static int snddev_icodec_close_rx(struct snddev_icodec_state *icodec) +{ + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + struct snddev_icodec_data *data = icodec->data; + + wake_lock(&drv->rx_idlelock); + + if (drv->snddev_vreg) + vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE); + + /* Disable power amplifier */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(0); + + if (support_aic3254) { + /* Restore default id for A3254 */ + if (data->aic3254_id != data->default_aic3254_id) + data->aic3254_id = data->default_aic3254_id; + /* Disable External Codec A3254 */ + if (aic3254_ops->aic3254_set_mode) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_RX, DOWNLINK_OFF); + } + if (support_adie) { + /* Disable ADIE */ + if (icodec->adie_path) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_OFF); + adie_codec_close(icodec->adie_path); + icodec->adie_path = NULL; + } + } + + afe_close(icodec->data->copp_id); + + if (icodec->data->voltage_on) + icodec->data->voltage_on(0); + + clk_disable(drv->rx_bitclk); + + if (support_aic3254_use_mclk) + snddev_icodec_rxclk_enable(icodec, 0); + else + clk_disable(drv->rx_osrclk); + + msm_snddev_rx_mclk_free(); + + icodec->enabled = 0; + + wake_unlock(&drv->rx_idlelock); + return 0; +} + +static int snddev_icodec_close_tx(struct snddev_icodec_state *icodec) +{ + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + struct snddev_icodec_data *data = icodec->data; + + wake_lock(&drv->tx_idlelock); + + if (drv->snddev_vreg) + vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE); + + /* Reuse pamp_off for TX platform-specific setup */ + if (icodec->data->pamp_on) + icodec->data->pamp_on(0); + + if (support_aic3254) { + /* Restore default id for A3254 */ + if (data->aic3254_id != data->default_aic3254_id) + data->aic3254_id = data->default_aic3254_id; + /* Disable External Codec A3254 */ + if (aic3254_ops->aic3254_set_mode) + aic3254_ops->aic3254_set_mode(AIC3254_CONFIG_TX, UPLINK_OFF); + } + if (support_adie) { + /* Disable ADIE */ + if (icodec->adie_path) { + adie_codec_proceed_stage(icodec->adie_path, + ADIE_CODEC_DIGITAL_OFF); + adie_codec_close(icodec->adie_path); + icodec->adie_path = NULL; + } + } + afe_close(icodec->data->copp_id); + + clk_disable(drv->tx_bitclk); + clk_disable(drv->tx_osrclk); + + if (support_aic3254_use_mclk) + snddev_icodec_rxclk_enable(icodec, 0); + + msm_snddev_tx_mclk_free(); + + + icodec->enabled = 0; + + wake_unlock(&drv->tx_idlelock); + return 0; +} + +static int snddev_icodec_set_device_volume_impl( + struct msm_snddev_info *dev_info, u32 volume) +{ + struct snddev_icodec_state *icodec; + + int rc = 0; + + icodec = dev_info->private_data; + + if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_DIGITAL) { + + rc = adie_codec_set_device_digital_volume(icodec->adie_path, + icodec->data->channel_mode, volume); + if (rc < 0) { + pr_aud_err("%s: unable to set_device_digital_volume for" + "%s volume in percentage = %u\n", + __func__, dev_info->name, volume); + return rc; + } + + } else if (icodec->data->dev_vol_type & SNDDEV_DEV_VOL_ANALOG) + rc = adie_codec_set_device_analog_volume(icodec->adie_path, + icodec->data->channel_mode, volume); + if (rc < 0) { + pr_aud_err("%s: unable to set_device_analog_volume for" + "%s volume in percentage = %u\n", + __func__, dev_info->name, volume); + return rc; + } + else { + pr_aud_err("%s: Invalid device volume control\n", __func__); + return -EPERM; + } + return rc; +} + +static int snddev_icodec_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + if (!dev_info) { + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + + if (icodec->data->capability & SNDDEV_CAP_RX) { + mutex_lock(&drv->rx_lock); + if (drv->rx_active) { + mutex_unlock(&drv->rx_lock); + rc = -EBUSY; + goto error; + } + rc = snddev_icodec_open_rx(icodec); + + if (!IS_ERR_VALUE(rc)) { + drv->rx_active = 1; + if (support_adie && (icodec->data->dev_vol_type & ( + SNDDEV_DEV_VOL_DIGITAL | + SNDDEV_DEV_VOL_ANALOG))) + rc = snddev_icodec_set_device_volume_impl( + dev_info, dev_info->dev_volume); + } + mutex_unlock(&drv->rx_lock); + } else { + mutex_lock(&drv->tx_lock); + if (drv->tx_active) { + mutex_unlock(&drv->tx_lock); + rc = -EBUSY; + goto error; + } + rc = snddev_icodec_open_tx(icodec); + + if (!IS_ERR_VALUE(rc)) { + drv->tx_active = 1; + if (support_adie && (icodec->data->dev_vol_type & ( + SNDDEV_DEV_VOL_DIGITAL | + SNDDEV_DEV_VOL_ANALOG))) + rc = snddev_icodec_set_device_volume_impl( + dev_info, dev_info->dev_volume); + } + mutex_unlock(&drv->tx_lock); + } +error: + return rc; +} + +static int snddev_icodec_close(struct msm_snddev_info *dev_info) +{ + int rc = 0; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + if (!dev_info) { + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + + if (icodec->data->capability & SNDDEV_CAP_RX) { + mutex_lock(&drv->rx_lock); + if (!drv->rx_active) { + mutex_unlock(&drv->rx_lock); + rc = -EPERM; + goto error; + } + rc = snddev_icodec_close_rx(icodec); + if (!IS_ERR_VALUE(rc)) + drv->rx_active = 0; + mutex_unlock(&drv->rx_lock); + } else { + mutex_lock(&drv->tx_lock); + if (!drv->tx_active) { + mutex_unlock(&drv->tx_lock); + rc = -EPERM; + goto error; + } + rc = snddev_icodec_close_tx(icodec); + if (!IS_ERR_VALUE(rc)) + drv->tx_active = 0; + mutex_unlock(&drv->tx_lock); + } + +error: + return rc; +} + +static int snddev_icodec_check_freq(u32 req_freq) +{ + int rc = -EINVAL; + + if ((req_freq != 0) && (req_freq >= 8000) && (req_freq <= 48000)) { + if ((req_freq == 8000) || (req_freq == 11025) || + (req_freq == 12000) || (req_freq == 16000) || + (req_freq == 22050) || (req_freq == 24000) || + (req_freq == 32000) || (req_freq == 44100) || + (req_freq == 48000)) { + rc = 0; + } else + pr_aud_info("%s: Unsupported Frequency:%d\n", __func__, + req_freq); + } + return rc; +} + +static int snddev_icodec_set_freq(struct msm_snddev_info *dev_info, u32 rate) +{ + int rc; + struct snddev_icodec_state *icodec; + + if (!dev_info) { + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + if (support_adie && + adie_codec_freq_supported(icodec->data->profile, rate) != 0) { + rc = -EINVAL; + goto error; + } else { + if (snddev_icodec_check_freq(rate) != 0) { + rc = -EINVAL; + goto error; + } else + icodec->sample_rate = rate; + } + + if (icodec->enabled) { + snddev_icodec_close(dev_info); + snddev_icodec_open(dev_info); + } + + return icodec->sample_rate; + +error: + return rc; +} + +static int snddev_icodec_enable_sidetone(struct msm_snddev_info *dev_info, + u32 enable, uint16_t gain) +{ + int rc = 0; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + /*3254 sidetone will be binded with dsp image.*/ + if (support_aic3254 || !support_adie) + goto error; + + if (!dev_info) { + pr_aud_err("invalid dev_info\n"); + rc = -EINVAL; + goto error; + } + + icodec = dev_info->private_data; + + if (icodec->data->capability & SNDDEV_CAP_RX) { + mutex_lock(&drv->rx_lock); + if (!drv->rx_active || !dev_info->opened) { + pr_aud_err("dev not active\n"); + rc = -EPERM; + mutex_unlock(&drv->rx_lock); + goto error; + } + rc = afe_sidetone(PRIMARY_I2S_TX, PRIMARY_I2S_RX, enable, gain); + if (rc < 0) + pr_aud_err("%s: AFE command sidetone failed\n", __func__); + mutex_unlock(&drv->rx_lock); + } else { + rc = -EINVAL; + pr_aud_err("rx device only\n"); + } + +error: + return rc; + +} +static int snddev_icodec_enable_anc(struct msm_snddev_info *dev_info, + u32 enable) +{ + int rc = 0; + struct adie_codec_anc_data *reg_writes; + struct acdb_cal_block cal_block; + struct snddev_icodec_state *icodec; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + + if (support_aic3254 || !support_adie) + goto error; + + pr_aud_info("%s: enable=%d\n", __func__, enable); + + if (!dev_info) { + pr_aud_err("invalid dev_info\n"); + rc = -EINVAL; + goto error; + } + icodec = dev_info->private_data; + + if ((icodec->data->capability & SNDDEV_CAP_RX) && + (icodec->data->capability & SNDDEV_CAP_ANC)) { + mutex_lock(&drv->rx_lock); + + if (!drv->rx_active || !dev_info->opened) { + pr_aud_err("dev not active\n"); + rc = -EPERM; + mutex_unlock(&drv->rx_lock); + goto error; + } + if (enable) { + get_anc_cal(&cal_block); + reg_writes = (struct adie_codec_anc_data *) + cal_block.cal_kvaddr; + + if (reg_writes == NULL) { + pr_aud_err("error, no calibration data\n"); + rc = -1; + mutex_unlock(&drv->rx_lock); + goto error; + } + + rc = adie_codec_enable_anc(icodec->adie_path, + 1, reg_writes); + } else { + rc = adie_codec_enable_anc(icodec->adie_path, + 0, NULL); + } + mutex_unlock(&drv->rx_lock); + } else { + rc = -EINVAL; + pr_aud_err("rx and ANC device only\n"); + } + +error: + return rc; + +} + +int snddev_icodec_set_device_volume(struct msm_snddev_info *dev_info, + u32 volume) +{ + struct snddev_icodec_state *icodec; + struct mutex *lock; + struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; + int rc = -EPERM; + + if (!dev_info) { + pr_aud_info("%s : device not intilized.\n", __func__); + return -EINVAL; + } + + icodec = dev_info->private_data; + + if (!(icodec->data->dev_vol_type & (SNDDEV_DEV_VOL_DIGITAL + | SNDDEV_DEV_VOL_ANALOG))) { + + pr_aud_info("%s : device %s does not support device volume " + "control.", __func__, dev_info->name); + return -EPERM; + } + dev_info->dev_volume = volume; + + if (icodec->data->capability & SNDDEV_CAP_RX) + lock = &drv->rx_lock; + else + lock = &drv->tx_lock; + + mutex_lock(lock); + + rc = snddev_icodec_set_device_volume_impl(dev_info, + dev_info->dev_volume); + mutex_unlock(lock); + return rc; +} + +void htc_8x60_register_icodec_ops(struct q6v2audio_icodec_ops *ops) +{ + audio_ops = ops; +} + +static int snddev_icodec_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_icodec_data *pdata; + struct msm_snddev_info *dev_info; + struct snddev_icodec_state *icodec; + static int first_time = 1; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + rc = -1; + goto error; + } + pdata = pdev->dev.platform_data; + if ((pdata->capability & SNDDEV_CAP_RX) && + (pdata->capability & SNDDEV_CAP_TX)) { + pr_aud_err("%s: invalid device data either RX or TX\n", __func__); + goto error; + } + icodec = kzalloc(sizeof(struct snddev_icodec_state), GFP_KERNEL); + if (!icodec) { + rc = -ENOMEM; + goto error; + } + dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + kfree(icodec); + rc = -ENOMEM; + goto error; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->private_data = (void *) icodec; + dev_info->dev_ops.open = snddev_icodec_open; + dev_info->dev_ops.close = snddev_icodec_close; + dev_info->dev_ops.set_freq = snddev_icodec_set_freq; + dev_info->dev_ops.set_device_volume = snddev_icodec_set_device_volume; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + msm_snddev_register(dev_info); + icodec->data = pdata; + icodec->sample_rate = pdata->default_sample_rate; + dev_info->sample_rate = pdata->default_sample_rate; + dev_info->channel_mode = pdata->channel_mode; + if (pdata->capability & SNDDEV_CAP_RX) + dev_info->dev_ops.enable_sidetone = + snddev_icodec_enable_sidetone; + else + dev_info->dev_ops.enable_sidetone = NULL; + + if (pdata->capability & SNDDEV_CAP_ANC) { + dev_info->dev_ops.enable_anc = + snddev_icodec_enable_anc; + } else { + dev_info->dev_ops.enable_anc = NULL; + } + if (first_time) { + if (audio_ops->support_aic3254) { + support_aic3254 = audio_ops->support_aic3254(); + } else { + support_aic3254 = 0; + } + pr_aud_info("%s: support_aic3254 = %d\n", + __func__, support_aic3254); + + if (audio_ops->support_adie) { + support_adie = audio_ops->support_adie(); + } else { + support_adie = 0; + } + pr_aud_info("%s: support_adie = %d\n", + __func__, support_adie); + + if (audio_ops->is_msm_i2s_slave) { + msm_codec_i2s_slave_mode = audio_ops->is_msm_i2s_slave(); + } else { + msm_codec_i2s_slave_mode = 0; + } + pr_aud_info("%s: msm_codec_i2s_slave_mode = %d\n", + __func__, msm_codec_i2s_slave_mode); + + if (audio_ops->support_aic3254_use_mclk) + support_aic3254_use_mclk = \ + audio_ops->support_aic3254_use_mclk(); + else + support_aic3254_use_mclk = 0; + pr_aud_info("%s: support_aic3254_use_mclk = %d\n", + __func__, support_aic3254_use_mclk); + + first_time = 0; + } + +error: + return rc; +} + +static int snddev_icodec_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver snddev_icodec_driver = { + .probe = snddev_icodec_probe, + .remove = snddev_icodec_remove, + .driver = { .name = "snddev_icodec" } +}; + + +void htc_8x60_register_aic3254_ops(struct q6v2audio_aic3254_ops *ops) +{ + aic3254_ops = ops; +} + +int update_aic3254_info(struct aic3254_info *info) +{ + struct msm_snddev_info *dev_info; + int rc = 0; + + dev_info = audio_dev_ctrl_find_dev(info->dev_id); + if (IS_ERR(dev_info)) + rc = -ENODEV; + else { + if ((dev_info->copp_id == PRIMARY_I2S_RX) || + (dev_info->copp_id == PRIMARY_I2S_TX)) { + struct snddev_icodec_state *icodec; + icodec = dev_info->private_data; + icodec->data->aic3254_id = info->path_id; + pr_aud_info("%s: update aic3254 id of device %s as %d\n", + __func__, dev_info->name, icodec->data->aic3254_id); + } + } + + return rc; +} + +module_param(msm_codec_i2s_slave_mode, bool, 0); +MODULE_PARM_DESC(msm_codec_i2s_slave_mode, "Set MSM to I2S slave clock mode"); + +static int __init snddev_icodec_init(void) +{ + s32 rc; + struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; + + rc = platform_driver_register(&snddev_icodec_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for snddev icodec failed\n", + __func__); + goto error_snddev_icodec_driver; + } + + rc = platform_driver_register(&msm_cdcclk_ctl_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for msm snddev failed\n", + __func__); + goto error_msm_cdcclk_ctl_driver; + } + + mutex_init(&icodec_drv->rx_lock); + mutex_init(&icodec_drv->tx_lock); + + mutex_init(&icodec_drv->rx_mclk_lock); + + icodec_drv->rx_active = 0; + icodec_drv->tx_active = 0; + icodec_drv->snddev_vreg = vreg_init(); + + wake_lock_init(&icodec_drv->tx_idlelock, WAKE_LOCK_IDLE, + "snddev_tx_idle"); + wake_lock_init(&icodec_drv->rx_idlelock, WAKE_LOCK_IDLE, + "snddev_rx_idle"); + return 0; + +error_msm_cdcclk_ctl_driver: + platform_driver_unregister(&snddev_icodec_driver); +error_snddev_icodec_driver: + return -ENODEV; +} + +static void __exit snddev_icodec_exit(void) +{ + struct snddev_icodec_drv_state *icodec_drv = &snddev_icodec_drv; + + platform_driver_unregister(&snddev_icodec_driver); + platform_driver_unregister(&msm_cdcclk_ctl_driver); + + clk_put(icodec_drv->rx_osrclk); + clk_put(icodec_drv->tx_osrclk); + if (icodec_drv->snddev_vreg) { + vreg_deinit(icodec_drv->snddev_vreg); + icodec_drv->snddev_vreg = NULL; + } + return; +} + +module_init(snddev_icodec_init); +module_exit(snddev_icodec_exit); + +MODULE_DESCRIPTION("ICodec Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c new file mode 100644 index 00000000000..ddeb8d2a894 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.c @@ -0,0 +1,472 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "snddev_mi2s.h" + +#define SNDDEV_MI2S_PCM_SZ 32 /* 16 bit / sample stereo mode */ +#define SNDDEV_MI2S_MUL_FACTOR 3 /* Multi by 8 Shift by 3 */ +#define SNDDEV_MI2S_CLK_RATE(freq) \ + (((freq) * (SNDDEV_MI2S_PCM_SZ)) << (SNDDEV_MI2S_MUL_FACTOR)) + + +/* Global state for the driver */ +struct snddev_mi2s_drv_state { + + struct clk *tx_osrclk; + struct clk *tx_bitclk; + int mi2s_ws; + int mi2s_mclk; + int mi2s_sclk; + int fm_mi2s_sd; +}; + +static struct snddev_mi2s_drv_state snddev_mi2s_drv; + +static struct msm_mi2s_gpio_data *mi2s_gpio; + +static int mi2s_gpios_request(void) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + rc = gpio_request(snddev_mi2s_drv.mi2s_ws, "MI2S_WS"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MI2S_WS failed\n", __func__); + return rc; + } + + rc = gpio_request(snddev_mi2s_drv.mi2s_sclk, "MI2S_SCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MI2S_SCLK failed\n", __func__); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + return rc; + } + + rc = gpio_request(snddev_mi2s_drv.mi2s_mclk, "MI2S_MCLK"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for MI2S_MCLK failed\n", + __func__); + gpio_free(snddev_mi2s_drv.mi2s_ws); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + return rc; + } + + rc = gpio_request(snddev_mi2s_drv.fm_mi2s_sd, "FM_MI2S_SD"); + if (rc < 0) { + pr_aud_err("%s: GPIO request for FM_MI2S_SD failed\n", + __func__); + gpio_free(snddev_mi2s_drv.mi2s_ws); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + gpio_free(snddev_mi2s_drv.mi2s_mclk); + return rc; + } + + return rc; +} + +static void mi2s_gpios_free(void) +{ + pr_debug("%s\n", __func__); + gpio_free(snddev_mi2s_drv.mi2s_ws); + gpio_free(snddev_mi2s_drv.mi2s_sclk); + gpio_free(snddev_mi2s_drv.mi2s_mclk); + gpio_free(snddev_mi2s_drv.fm_mi2s_sd); +} + +static int mi2s_get_gpios(struct platform_device *pdev) +{ + int rc = 0; + struct resource *res; + + /* Claim all of the GPIOs. */ + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "mi2s_ws"); + if (!res) { + pr_aud_err("%s: failed to get gpio MI2S_WS\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.mi2s_ws = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, "mi2s_sclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MI2S_SCLK\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.mi2s_sclk = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "mi2s_mclk"); + if (!res) { + pr_aud_err("%s: failed to get gpio MI2S_MCLK\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.mi2s_mclk = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_IO, + "fm_mi2s_sd"); + if (!res) { + pr_aud_err("%s: failed to get gpio FM_MI2S_SD\n", __func__); + return -ENODEV; + } + + snddev_mi2s_drv.fm_mi2s_sd = res->start; + + return rc; +} + +static int mi2s_fm_probe(struct platform_device *pdev) +{ + int rc = 0; + + pr_aud_info("%s:\n", __func__); + + rc = mi2s_get_gpios(pdev); + if (rc < 0) { + pr_aud_err("%s: GPIO configuration failed\n", __func__); + return rc; + } + + mi2s_gpio = (struct msm_mi2s_gpio_data *)(pdev->dev.platform_data); + return rc; +} + +static struct platform_driver mi2s_fm_driver = { + .probe = mi2s_fm_probe, + .driver = { .name = "msm_mi2s"} +}; + +static u8 num_of_bits_set(u8 sd_line_mask) +{ + u8 num_bits_set = 0; + + while (sd_line_mask) { + + if (sd_line_mask & 1) + num_bits_set++; + sd_line_mask = sd_line_mask >> 1; + } + return num_bits_set; +} + +static int snddev_mi2s_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + union afe_port_config afe_config; + u8 channels; + u8 num_of_sd_lines = 0; + struct snddev_mi2s_drv_state *drv = &snddev_mi2s_drv; + struct snddev_mi2s_data *snddev_mi2s_data = dev_info->private_data; + + if (!dev_info) { + pr_aud_err("%s: msm_snddev_info is null\n", __func__); + return -EINVAL; + } + + /* set up osr clk */ + drv->tx_osrclk = clk_get(0, "mi2s_osr_clk"); + if (IS_ERR(drv->tx_osrclk)) + pr_aud_err("%s master clock Error\n", __func__); + + rc = clk_set_rate(drv->tx_osrclk, + SNDDEV_MI2S_CLK_RATE(dev_info->sample_rate)); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("ERROR setting osr clock\n"); + return -ENODEV; + } + clk_enable(drv->tx_osrclk); + + /* set up bit clk */ + drv->tx_bitclk = clk_get(0, "mi2s_bit_clk"); + if (IS_ERR(drv->tx_bitclk)) + pr_aud_err("%s clock Error\n", __func__); + + rc = clk_set_rate(drv->tx_bitclk, 8); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("ERROR setting bit clock\n"); + clk_disable(drv->tx_osrclk); + return -ENODEV; + } + clk_enable(drv->tx_bitclk); + + afe_config.mi2s.bitwidth = 16; + + if (snddev_mi2s_data->channel_mode == 1) + channels = AFE_MI2S_MONO; + else if (snddev_mi2s_data->channel_mode == 2) + channels = AFE_MI2S_STEREO; + else if (snddev_mi2s_data->channel_mode == 4) + channels = AFE_MI2S_4CHANNELS; + else if (snddev_mi2s_data->channel_mode == 6) + channels = AFE_MI2S_6CHANNELS; + else if (snddev_mi2s_data->channel_mode == 8) + channels = AFE_MI2S_8CHANNELS; + else { + pr_aud_err("ERROR: Invalid MI2S channel mode\n"); + goto error_invalid_data; + } + + num_of_sd_lines = num_of_bits_set(snddev_mi2s_data->sd_lines); + + switch (num_of_sd_lines) { + case 1: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0: + afe_config.mi2s.line = AFE_I2S_SD0; + break; + case MI2S_SD1: + afe_config.mi2s.line = AFE_I2S_SD1; + break; + case MI2S_SD2: + afe_config.mi2s.line = AFE_I2S_SD2; + break; + case MI2S_SD3: + afe_config.mi2s.line = AFE_I2S_SD3; + break; + default: + pr_aud_err("%s: invalid SD line\n", + __func__); + goto error_invalid_data; + } + if (channels != AFE_MI2S_STEREO && + channels != AFE_MI2S_MONO) { + pr_aud_err("%s: for one SD line, channel " + "must be 1 or 2\n", __func__); + goto error_invalid_data; + } + afe_config.mi2s.channel = channels; + break; + case 2: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0 | MI2S_SD1: + afe_config.mi2s.line = AFE_I2S_QUAD01; + break; + case MI2S_SD2 | MI2S_SD3: + afe_config.mi2s.line = AFE_I2S_QUAD23; + break; + default: + pr_aud_err("%s: invalid SD line\n", + __func__); + goto error_invalid_data; + } + if (channels != AFE_MI2S_4CHANNELS) { + pr_aud_err("%s: for two SD lines, channel " + "must be 1 and 2 or 3 and 4\n", __func__); + goto error_invalid_data; + } + break; + case 3: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0 | MI2S_SD1 | MI2S_SD2: + afe_config.mi2s.line = AFE_I2S_6CHS; + break; + default: + pr_aud_err("%s: invalid SD lines\n", + __func__); + goto error_invalid_data; + } + if (channels != AFE_MI2S_6CHANNELS) { + pr_aud_err("%s: for three SD lines, lines " + "must be 1, 2, and 3\n", __func__); + goto error_invalid_data; + } + break; + case 4: + switch (snddev_mi2s_data->sd_lines) { + case MI2S_SD0 | MI2S_SD1 | MI2S_SD2 | MI2S_SD3: + afe_config.mi2s.line = AFE_I2S_8CHS; + break; + default: + pr_aud_err("%s: invalid SD lines\n", + __func__); + goto error_invalid_data; + } + + if (channels != AFE_MI2S_8CHANNELS) { + pr_aud_err("%s: for four SD lines, lines " + "must be 1, 2, 3, and 4\n", __func__); + goto error_invalid_data; + } + break; + default: + pr_aud_err("%s: invalid SD lines\n", __func__); + goto error_invalid_data; + } + afe_config.mi2s.ws = 1; + rc = afe_open(snddev_mi2s_data->copp_id, &afe_config, + dev_info->sample_rate); + + if (rc < 0) { + pr_aud_err("%s: afe_open failed\n", __func__); + goto error_invalid_data; + } + + /*enable fm gpio here*/ + rc = mi2s_gpios_request(); + if (rc < 0) { + pr_aud_err("%s: GPIO request failed\n", __func__); + return rc; + } + + pr_aud_info("%s: afe_open done\n", __func__); + + return rc; + +error_invalid_data: + + clk_disable(drv->tx_bitclk); + clk_disable(drv->tx_osrclk); + return -EINVAL; +} + +static int snddev_mi2s_close(struct msm_snddev_info *dev_info) +{ + + struct snddev_mi2s_drv_state *mi2s_drv = &snddev_mi2s_drv; + struct snddev_mi2s_data *snddev_mi2s_data = dev_info->private_data; + + if (!dev_info) { + pr_aud_err("%s: msm_snddev_info is null\n", __func__); + return -EINVAL; + } + + if (!dev_info->opened) { + pr_aud_err(" %s: calling close device with out opening the" + " device\n", __func__); + return -EIO; + } + afe_close(snddev_mi2s_data->copp_id); + clk_disable(mi2s_drv->tx_bitclk); + clk_disable(mi2s_drv->tx_osrclk); + + mi2s_gpios_free(); + + pr_aud_info("%s:\n", __func__); + + return 0; +} + +static int snddev_mi2s_set_freq(struct msm_snddev_info *dev_info, u32 req_freq) +{ + if (req_freq != 48000) { + pr_aud_info("%s: Unsupported Frequency:%d\n", __func__, req_freq); + return -EINVAL; + } + return 48000; +} + + +static int snddev_mi2s_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_mi2s_data *pdata; + struct msm_snddev_info *dev_info; + + if (!pdev || !pdev->dev.platform_data) { + printk(KERN_ALERT "Invalid caller\n"); + return -ENODEV; + } + + pdata = pdev->dev.platform_data; + + dev_info = kzalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + pr_aud_err("%s: uneable to allocate memeory for msm_snddev_info\n", + __func__); + + return -ENOMEM; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->dev_ops.open = snddev_mi2s_open; + dev_info->dev_ops.close = snddev_mi2s_close; + dev_info->private_data = (void *)pdata; + dev_info->dev_ops.set_freq = snddev_mi2s_set_freq; + dev_info->capability = pdata->capability; + dev_info->opened = 0; + dev_info->sample_rate = pdata->sample_rate; + msm_snddev_register(dev_info); + + pr_aud_info("%s: probe done for %s\n", __func__, pdata->name); + return rc; +} + +static struct platform_driver snddev_mi2s_driver = { + .probe = snddev_mi2s_probe, + .driver = {.name = "snddev_mi2s"} +}; + +static int __init snddev_mi2s_init(void) +{ + s32 rc = 0; + + rc = platform_driver_register(&mi2s_fm_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: platform_driver_register for mi2s_fm_driver failed\n", + __func__); + goto error_mi2s_fm_platform_driver; + } + + rc = platform_driver_register(&snddev_mi2s_driver); + if (IS_ERR_VALUE(rc)) { + + pr_aud_err("%s: platform_driver_register failed\n", __func__); + goto error_platform_driver; + } + + pr_aud_info("snddev_mi2s_init : done\n"); + + return rc; + +error_platform_driver: + platform_driver_unregister(&mi2s_fm_driver); +error_mi2s_fm_platform_driver: + pr_aud_err("%s: encounter error\n", __func__); + return -ENODEV; +} + +static void __exit snddev_mi2s_exit(void) +{ + struct snddev_mi2s_drv_state *mi2s_drv = &snddev_mi2s_drv; + + platform_driver_unregister(&snddev_mi2s_driver); + clk_put(mi2s_drv->tx_osrclk); + clk_put(mi2s_drv->tx_bitclk); + return; +} + + +module_init(snddev_mi2s_init); +module_exit(snddev_mi2s_exit); + +MODULE_DESCRIPTION("MI2S Sound Device driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h new file mode 100644 index 00000000000..fa1c55e2bf8 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_mi2s.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6_V2_SNDDEV_MI2S_H +#define __MACH_QDSP6_V2_SNDDEV_MI2S_H + +struct snddev_mi2s_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* audpp routing */ + u16 channel_mode; + u16 sd_lines; + u32 sample_rate; +}; + +#define MI2S_SD0 (1 << 0) +#define MI2S_SD1 (1 << 1) +#define MI2S_SD2 (1 << 2) +#define MI2S_SD3 (1 << 3) + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_virtual.c b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.c new file mode 100644 index 00000000000..c0f2a9329ae --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.c @@ -0,0 +1,172 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include "snddev_virtual.h" + +static DEFINE_MUTEX(snddev_virtual_lock); + +static int snddev_virtual_open(struct msm_snddev_info *dev_info) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + + mutex_lock(&snddev_virtual_lock); + + if (!dev_info) { + pr_aud_err("%s: NULL dev_info\n", __func__); + + rc = -EINVAL; + goto done; + } + + if (!dev_info->opened) { + rc = afe_start_pseudo_port(dev_info->copp_id); + } else { + pr_aud_err("%s: Pseudo port 0x%x is already open\n", + __func__, dev_info->copp_id); + + rc = -EBUSY; + } + +done: + mutex_unlock(&snddev_virtual_lock); + + return rc; +} + +static int snddev_virtual_close(struct msm_snddev_info *dev_info) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + + mutex_lock(&snddev_virtual_lock); + + if (!dev_info) { + pr_aud_err("%s: NULL dev_info\n", __func__); + + rc = -EINVAL; + goto done; + } + + if (dev_info->opened) { + rc = afe_stop_pseudo_port(dev_info->copp_id); + } else { + pr_aud_err("%s: Pseudo port 0x%x is not open\n", + __func__, dev_info->copp_id); + + rc = -EPERM; + } + +done: + mutex_unlock(&snddev_virtual_lock); + + return rc; +} + +static int snddev_virtual_set_freq(struct msm_snddev_info *dev_info, u32 rate) +{ + int rc = 0; + + if (!dev_info) + rc = -EINVAL; + + return rate; +} + +static int snddev_virtual_probe(struct platform_device *pdev) +{ + int rc = 0; + struct snddev_virtual_data *pdata; + struct msm_snddev_info *dev_info; + + pr_debug("%s\n", __func__); + + if (!pdev || !pdev->dev.platform_data) { + pr_aud_err("%s: Invalid caller\n", __func__); + + rc = -EPERM; + goto done; + } + + pdata = pdev->dev.platform_data; + + dev_info = kmalloc(sizeof(struct msm_snddev_info), GFP_KERNEL); + if (!dev_info) { + pr_aud_err("%s: Out of memory\n", __func__); + + rc = -ENOMEM; + goto done; + } + + dev_info->name = pdata->name; + dev_info->copp_id = pdata->copp_id; + dev_info->private_data = (void *) NULL; + dev_info->dev_ops.open = snddev_virtual_open; + dev_info->dev_ops.close = snddev_virtual_close; + dev_info->dev_ops.set_freq = snddev_virtual_set_freq; + dev_info->capability = pdata->capability; + dev_info->sample_rate = 48000; + dev_info->opened = 0; + dev_info->sessions = 0; + + msm_snddev_register(dev_info); + +done: + return rc; +} + +static int snddev_virtual_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver snddev_virtual_driver = { + .probe = snddev_virtual_probe, + .remove = snddev_virtual_remove, + .driver = { .name = "snddev_virtual" } +}; + +static int __init snddev_virtual_init(void) +{ + int rc = 0; + + pr_debug("%s\n", __func__); + + rc = platform_driver_register(&snddev_virtual_driver); + if (IS_ERR_VALUE(rc)) { + pr_aud_err("%s: Platform driver register failure\n", __func__); + + return -ENODEV; + } + + return 0; +} + +static void __exit snddev_virtual_exit(void) +{ + platform_driver_unregister(&snddev_virtual_driver); + + return; +} + +module_init(snddev_virtual_init); +module_exit(snddev_virtual_exit); + +MODULE_DESCRIPTION("Virtual Sound Device driver"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/mach-msm/qdsp6v3/snddev_virtual.h b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.h new file mode 100644 index 00000000000..dec4d0739de --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/snddev_virtual.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __MACH_QDSP6V2_SNDDEV_VIRTUAL_H +#define __MACH_QDSP6V2_SNDDEV_VIRTUAL_H + +struct snddev_virtual_data { + u32 capability; /* RX or TX */ + const char *name; + u32 copp_id; /* Audpp routing */ +}; +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h new file mode 100644 index 00000000000..599919988ea --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60.h @@ -0,0 +1,2269 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Code Aurora Forum, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ +#ifndef __MACH_QDSP6V2_TIMPANI_PROFILE_H +#define __MACH_QDSP6V2_TIMPANI_PROFILE_H + +/* + * TX Device Profiles + */ + +/* Analog MIC */ +/* AMIC Primary mono */ +#define AMIC_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + + +/* AMIC Secondary mono */ +#define AMIC_SEC_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98 },\ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* AMIC dual */ +#define AMIC_DUAL_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xB0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAC, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* + * Digital MIC + */ +/* DMIC1 Primary (DMIC 1 - TX1) */ +#define DMIC1_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0x1F, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0x3F, 0x21)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0x3F, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x39, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* DMIC1 Secondary - (DMIC 2 - TX1) */ +#define DMIC1_SEC_MONO_8000_OSR_64 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x12)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* DMIC Dual Primary (DMIC 1/2 - TX1) */ +#define DMIC1_PRI_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0x1F, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0x3F, 0x19)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0x3F, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x39, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)} } + +/* DMIC2 Dual Primary (DMIC 3/4 - TX2 - Left/Right) */ +#define DMIC2_SEC_DUAL_8000_OSR_64 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x22)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0xF0, 0xE0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HS_DMIC2_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0x1F, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0x3F, 0x19)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0x3F, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x39, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA8, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x92, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * LINE IN + */ +#define LINEIN_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_PRI_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_SEC_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x2E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0xF0, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_SEC_STEREO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x2E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0xF0, 0xE0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEIN_SEC_STEREO_8000_OSR_64 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x22)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x96, 0xFF, 0x18)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0xF0, 0xE0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xA2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA6, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA7, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0xC0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * AUX IN + */ +#define AUXIN_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* Headset MIC */ +#define HEADSET_AMIC2_TX_MONO_PRI_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * RX Device Profiles + */ + +/* RX EAR */ +#define EAR_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define EAR_SEC_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xCA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* ANC Headset: Speakers on Primary Rx, Noise Microphones on Secondary Tx */ + +#define ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x95, 0xFF, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9B, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xC0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xD0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * RX HPH PRIMARY + */ + +/* RX HPH CLASS AB CAPLESS */ + +#define HEADSET_AB_CPLS_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_CPLS_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_CPLS_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_CPLS_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS AB LEGACY */ + +#define HPH_PRI_AB_LEG_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HP_PRI_AB_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_AB_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x09)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x186A0}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF9)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x27)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS D LEGACY */ + +#define HPH_PRI_D_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0A, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_PRI_D_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x21, 0xFF, 0x60)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x22, 0xFF, 0xE1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x26, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2D, 0xFF, 0x6F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2E, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3F, 0xFF, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x40, 0xFF, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x41, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x42, 0xFF, 0xBB)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x43, 0xFF, 0xF2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x44, 0xF7, 0x37)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x45, 0xFF, 0xFF)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x46, 0xFF, 0x77)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x47, 0xFF, 0xF2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x48, 0xF7, 0x37)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x49, 0xFF, 0xFF)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4A, 0xFF, 0x77)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0xFF, 0x8C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* + * RX HPH SECONDARY + */ + +/* RX HPH CLASS AB CAPLESS */ +#define HPH_SEC_AB_CPLS_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#define HPH_SEC_AB_CPLS_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_SEC_AB_CPLS_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x48)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS AB LEGACY */ +#define HPH_SEC_AB_LEG_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_SEC_AB_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#define HPH_SEC_AB_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x48)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFD, 0xF9)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX HPH CLASS D LEGACY */ + +#define HPH_SEC_D_LEG_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x50, 0x50)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0A, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000},\ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define HPH_SEC_D_LEG_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x50, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0A, 0x0A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX LINE OUT PRIMARY */ +#define LINEOUT_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0c)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX LINE OUT SECONDARY */ +#define LINEOUT_SEC_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_SEC_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LINEOUT_SEC_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x29, 0xFF, 0xC2)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x48)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA5, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x48, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define SPEAKER_PRI_STEREO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX AUX */ +#define AUXOUT_PRI_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0xFF, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x20, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 50000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x07)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x20, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define AUXOUT_SEC_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0F)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA1, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x98, 0xFF, 0x02)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x28, 0xFF, 0xCA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x40, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 50000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x07)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x30, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA4, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x0E)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAA, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x40, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* + * LB Device Profiles + */ + +/* EAR */ +#define LB_EAR_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* HPH CLASS AB CAPLESS */ +#define LB_HPH_AB_CPLS_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_AB_CPLS_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define SPEAKER_HPH_AB_CPL_PRI_STEREO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_AB_CPLS_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x55)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* HPH CLASS AB LEGACY */ +#define LB_HPH_AB_LEG_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xFC)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_PHP_AB_LEG_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xFC)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x08, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_AB_LEG_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x59)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0xFC)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFC, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* HPH CLASS D LEGACY */ +#define LB_HPH_D_LEG_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3A, 0x2A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x2F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_HPH_D_LEG_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0xA6)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3A, 0x3A)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 300000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x3F)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x3F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3E, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* LINE OUT */ +#define LB_LINEOUT_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_LINEOUT_PRI_DIFF \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x10, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x80, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x10, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define LB_LINEOUT_PRI_STEREO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x58, 0x58)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 100000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFF, 0xA4)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* AUX OUT */ +#define LB_AUXOUT_PRI_MONO \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0xE0, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xFF, 0xA0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x03)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 50000}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x07)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0x07, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0x30, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x39, 0xE0, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY RX */ +#define TTY_HEADSET_MONO_RX_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x06)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x45)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xC5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY TX */ +#define TTY_HEADSET_MONO_TX_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h new file mode 100644 index 00000000000..3f747bd99c7 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h @@ -0,0 +1,699 @@ +/* arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_lead.h + * + * Copyright (C) 2010 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TIMPANI_PROFILE_LEAD_H +#define __TIMPANI_PROFILE_LEAD_H + +/* RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define HEADSET_STEREO_AB_CPLS_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x27)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* RX EAR MONO*/ +#define EAR_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x23)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define SPEAKER_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x23)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANALOG IMIC Primary MONO */ +#define AMIC_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/*back mic tx*/ +#define AMIC_SEC_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x00)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xAB, 0x09, 0x09)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANC Headset: Speakers on Primary Rx, Noise Microphones on Secondary Tx */ + +#define ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x95, 0xFF, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9B, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xC0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xD0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY RX */ +#define TTY_HEADSET_MONO_RX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x06)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x45)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xC5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x20, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* TTY TX */ +#define TTY_HEADSET_MONO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* FM HPH, RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_HEADSET_AB_CPLS_RX_48000 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* FM SPK, RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_SPEAKER_RX \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* +#define SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } +*/ +#define SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x50, 0xFF, 0xEA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x50, 0xFF, 0xEA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0x29)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xBB)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define DUAL_MIC_STEREO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#endif diff --git a/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h new file mode 100644 index 00000000000..e2bcc30e227 --- /dev/null +++ b/arch/arm/mach-msm/qdsp6v3/timpani_profile_8x60_vigor.h @@ -0,0 +1,641 @@ +/* arch/arm/mach-msm/qdsp6v2_1x/timpani_profile_8x60_lead.h + * + * Copyright (C) 2010 HTC Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __TIMPANI_PROFILE_VIGOR_H +#define __TIMPANI_PROFILE_VIGOR_H + +/* RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define HEADSET_STEREO_AB_CPLS_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xFD)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x27)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x36, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x37, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* RX EAR MONO*/ +#define EAR_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x21, 0xFF, 0x28)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x22, 0xFF, 0xA9)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x26, 0xFF, 0xD1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define SPEAKER_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x21, 0xFF, 0x28)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x22, 0xFF, 0xA9)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x26, 0xFF, 0xD1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANALOG IMIC Primary MONO */ +#define AMIC_PRI_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/*back mic tx*/ +#define AMIC_SEC_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xFC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xFC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x3A98 },\ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* Headset MIC */ +#define HS_AMIC2_MONO_8000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* ANC Headset: Speakers on Primary Rx, Noise Microphones on Secondary Tx */ + +#define ANC_HEADSET_CPLS_AMIC1_AUXL_RX1_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x95, 0xFF, 0x40)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9B, 0x01, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0xD0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0xC1)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFF, 0x29)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xC0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xD0, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x18, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x19, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x09, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0A, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* TTY RX */ +#define TTY_HEADSET_MONO_RX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x97, 0xFF, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x06)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x01)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x4C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x45)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xC5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x20, 0x20)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* TTY TX */ +#define TTY_HEADSET_MONO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xA8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +/* FM HPH, RX HPH CLASS AB CAPLESS, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_HEADSET_AB_CPLS_RX_48000 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +/* FM SPK, RX SPK, STEREO OR MONO OR MONO-DIFF */ +#define AUXPGA_SPEAKER_RX \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x2F, 0x88, 0x88)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x30, 0x90, 0x90)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xBB8}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x80)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x10, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0xAA)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x38, 0xEE, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x90, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x33, 0xF0, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + + +#define SPEAKER_HPH_AB_CPL_PRI_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x02, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x03)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x02, 0x02)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x84, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x85, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0x0F, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x81, 0xFF, 0x0E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x24, 0x6F, 0x6C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xB7, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x55)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x08)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xCA)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0xF5)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x4C, 0xFE, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x48)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0x1388}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0xF8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x10)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x24)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x04)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE0, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE1, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x1C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFE, 0x3C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE2, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xE3, 0xFC, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3C, 0x27, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3D, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x32, 0xF8, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x31, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3A, 0x24, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x3B, 0x04, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#define DUAL_MIC_STEREO_TX_48000_OSR_256 \ + {{ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_FLASH_IMAGE}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x05)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x80, 0x05, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0x30)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0xAC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x82, 0xFF, 0x1E)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0xA3, 0x01, 0x01)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x93, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x94, 0xFF, 0x1B)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x99, 0x0F, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x9F, 0x03, 0x03)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0xC8)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0xBC)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x65)}, \ + {ADIE_CODEC_ACTION_DELAY_WAIT, 0xbb8 }, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x0C)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x86, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x87, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xC0)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_ANALOG_READY}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x8A, 0xF0, 0xF0)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x83, 0x0C, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_ANALOG_OFF}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x0D, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x14, 0xFF, 0x64)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x11, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_ENTRY, ADIE_CODEC_PACK_ENTRY(0x12, 0xFF, 0x00)}, \ + {ADIE_CODEC_ACTION_STAGE_REACHED, ADIE_CODEC_DIGITAL_OFF} } + +#endif diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c index 33e592f7c6a..b01a48c9bf0 100644 --- a/arch/arm/mach-msm/subsystem_map.c +++ b/arch/arm/mach-msm/subsystem_map.c @@ -358,7 +358,7 @@ struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys, temp_phys += SZ_4K, temp_va += SZ_4K) { ret = iommu_map(d, temp_va, temp_phys, - get_order(SZ_4K), 0); + get_order(SZ_4K), (IOMMU_READ | IOMMU_WRITE)); if (ret) { pr_err("%s: could not map iommu for" " domain %p, iova %lx," @@ -372,8 +372,7 @@ struct msm_mapped_buffer *msm_subsystem_map_buffer(unsigned long phys, if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X) msm_iommu_map_extra - (d, temp_va, length, SZ_4K, - (IOMMU_READ | IOMMU_WRITE)); + (d, temp_va, length, SZ_4K, (IOMMU_READ | IOMMU_WRITE)); } } diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c index 23d3980ef59..d90e244e05e 100644 --- a/arch/arm/mach-mv78xx0/common.c +++ b/arch/arm/mach-mv78xx0/common.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -170,7 +171,7 @@ void __init mv78xx0_map_io(void) void __init mv78xx0_ehci0_init(void) { orion_ehci_init(&mv78xx0_mbus_dram_info, - USB0_PHYS_BASE, IRQ_MV78XX0_USB_0); + USB0_PHYS_BASE, IRQ_MV78XX0_USB_0, EHCI_PHY_NA); } diff --git a/arch/arm/mach-mv78xx0/mpp.h b/arch/arm/mach-mv78xx0/mpp.h index b61b5092712..3752302ae2e 100644 --- a/arch/arm/mach-mv78xx0/mpp.h +++ b/arch/arm/mach-mv78xx0/mpp.h @@ -24,296 +24,296 @@ #define MPP_78100_A0_MASK MPP(0, 0x0, 0, 0, 1) #define MPP0_GPIO MPP(0, 0x0, 1, 1, 1) -#define MPP0_GE0_COL MPP(0, 0x1, 1, 0, 1) -#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 1, 1) +#define MPP0_GE0_COL MPP(0, 0x1, 0, 0, 1) +#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 0, 1) #define MPP0_UNUSED MPP(0, 0x3, 0, 0, 1) #define MPP1_GPIO MPP(1, 0x0, 1, 1, 1) -#define MPP1_GE0_RXERR MPP(1, 0x1, 1, 0, 1) -#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 1, 1) +#define MPP1_GE0_RXERR MPP(1, 0x1, 0, 0, 1) +#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 0, 1) #define MPP1_UNUSED MPP(1, 0x3, 0, 0, 1) #define MPP2_GPIO MPP(2, 0x0, 1, 1, 1) -#define MPP2_GE0_CRS MPP(2, 0x1, 1, 0, 1) -#define MPP2_GE1_RXCTL MPP(2, 0x2, 1, 0, 1) +#define MPP2_GE0_CRS MPP(2, 0x1, 0, 0, 1) +#define MPP2_GE1_RXCTL MPP(2, 0x2, 0, 0, 1) #define MPP2_UNUSED MPP(2, 0x3, 0, 0, 1) #define MPP3_GPIO MPP(3, 0x0, 1, 1, 1) -#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 1, 1) -#define MPP3_GE1_RXCLK MPP(3, 0x2, 1, 0, 1) +#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 0, 1) +#define MPP3_GE1_RXCLK MPP(3, 0x2, 0, 0, 1) #define MPP3_UNUSED MPP(3, 0x3, 0, 0, 1) #define MPP4_GPIO MPP(4, 0x0, 1, 1, 1) -#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 1, 1) -#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 1, 1) +#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 0, 1) +#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 0, 1) #define MPP4_UNUSED MPP(4, 0x3, 0, 0, 1) #define MPP5_GPIO MPP(5, 0x0, 1, 1, 1) -#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 1, 1) -#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 1, 1) +#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 0, 1) +#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 0, 1) #define MPP5_UNUSED MPP(5, 0x3, 0, 0, 1) #define MPP6_GPIO MPP(6, 0x0, 1, 1, 1) -#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 1, 1) -#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 1, 1) +#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 0, 1) +#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 0, 1) #define MPP6_UNUSED MPP(6, 0x3, 0, 0, 1) #define MPP7_GPIO MPP(7, 0x0, 1, 1, 1) -#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 1, 1) -#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 1, 1) +#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 0, 1) +#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 0, 1) #define MPP7_UNUSED MPP(7, 0x3, 0, 0, 1) #define MPP8_GPIO MPP(8, 0x0, 1, 1, 1) -#define MPP8_GE0_RXD4 MPP(8, 0x1, 1, 0, 1) -#define MPP8_GE1_RXD0 MPP(8, 0x2, 1, 0, 1) +#define MPP8_GE0_RXD4 MPP(8, 0x1, 0, 0, 1) +#define MPP8_GE1_RXD0 MPP(8, 0x2, 0, 0, 1) #define MPP8_UNUSED MPP(8, 0x3, 0, 0, 1) #define MPP9_GPIO MPP(9, 0x0, 1, 1, 1) -#define MPP9_GE0_RXD5 MPP(9, 0x1, 1, 0, 1) -#define MPP9_GE1_RXD1 MPP(9, 0x2, 1, 0, 1) +#define MPP9_GE0_RXD5 MPP(9, 0x1, 0, 0, 1) +#define MPP9_GE1_RXD1 MPP(9, 0x2, 0, 0, 1) #define MPP9_UNUSED MPP(9, 0x3, 0, 0, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1) -#define MPP10_GE0_RXD6 MPP(10, 0x1, 1, 0, 1) -#define MPP10_GE1_RXD2 MPP(10, 0x2, 1, 0, 1) +#define MPP10_GE0_RXD6 MPP(10, 0x1, 0, 0, 1) +#define MPP10_GE1_RXD2 MPP(10, 0x2, 0, 0, 1) #define MPP10_UNUSED MPP(10, 0x3, 0, 0, 1) #define MPP11_GPIO MPP(11, 0x0, 1, 1, 1) -#define MPP11_GE0_RXD7 MPP(11, 0x1, 1, 0, 1) -#define MPP11_GE1_RXD3 MPP(11, 0x2, 1, 0, 1) +#define MPP11_GE0_RXD7 MPP(11, 0x1, 0, 0, 1) +#define MPP11_GE1_RXD3 MPP(11, 0x2, 0, 0, 1) #define MPP11_UNUSED MPP(11, 0x3, 0, 0, 1) #define MPP12_GPIO MPP(12, 0x0, 1, 1, 1) -#define MPP12_M_BB MPP(12, 0x3, 1, 0, 1) -#define MPP12_UA0_CTSn MPP(12, 0x4, 1, 0, 1) -#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 1, 1) -#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 1, 1) +#define MPP12_M_BB MPP(12, 0x3, 0, 0, 1) +#define MPP12_UA0_CTSn MPP(12, 0x4, 0, 0, 1) +#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 0, 1) +#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 0, 1) #define MPP12_UNUSED MPP(12, 0x1, 0, 0, 1) #define MPP13_GPIO MPP(13, 0x0, 1, 1, 1) -#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 1, 1) -#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 1, 1) -#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 1, 1) -#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 1, 1) +#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 0, 1) +#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 0, 1) +#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 0, 1) +#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 0, 1) #define MPP13_UNUSED MPP(13, 0x1, 0, 0, 1) #define MPP14_GPIO MPP(14, 0x0, 1, 1, 1) -#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 1, 1) -#define MPP14_UA1_CTSn MPP(14, 0x4, 1, 0, 1) -#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 1, 1) -#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 1, 1) +#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 0, 1) +#define MPP14_UA1_CTSn MPP(14, 0x4, 0, 0, 1) +#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 0, 1) +#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 0, 1) #define MPP14_UNUSED MPP(14, 0x1, 0, 0, 1) #define MPP15_GPIO MPP(15, 0x0, 1, 1, 1) -#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 1, 1) -#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 1, 1) -#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 1, 1) -#define MPP15_TDM_SMISO MPP(15, 0x6, 1, 0, 1) +#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 0, 1) +#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 0, 1) +#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 0, 1) +#define MPP15_TDM_SMISO MPP(15, 0x6, 0, 0, 1) #define MPP15_UNUSED MPP(15, 0x1, 0, 0, 1) #define MPP16_GPIO MPP(16, 0x0, 1, 1, 1) -#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 1, 1) -#define MPP16_UA2_TXD MPP(16, 0x4, 0, 1, 1) -#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 1, 1) -#define MPP16_TDM_INTn MPP(16, 0x6, 1, 0, 1) +#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 0, 1) +#define MPP16_UA2_TXD MPP(16, 0x4, 0, 0, 1) +#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 0, 1) +#define MPP16_TDM_INTn MPP(16, 0x6, 0, 0, 1) #define MPP16_UNUSED MPP(16, 0x1, 0, 0, 1) #define MPP17_GPIO MPP(17, 0x0, 1, 1, 1) -#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 1, 1) -#define MPP17_UA2_RXD MPP(17, 0x4, 1, 0, 1) -#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 1, 1) -#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 1, 1) +#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 0, 1) +#define MPP17_UA2_RXD MPP(17, 0x4, 0, 0, 1) +#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 0, 1) +#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 0, 1) #define MPP17_UNUSED MPP(17, 0x1, 0, 0, 1) #define MPP18_GPIO MPP(18, 0x0, 1, 1, 1) -#define MPP18_UA0_CTSn MPP(18, 0x4, 1, 0, 1) -#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 1, 1) +#define MPP18_UA0_CTSn MPP(18, 0x4, 0, 0, 1) +#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 0, 1) #define MPP18_UNUSED MPP(18, 0x1, 0, 0, 1) #define MPP19_GPIO MPP(19, 0x0, 1, 1, 1) -#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 1, 1) -#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 1, 1) +#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 0, 1) +#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 0, 1) #define MPP19_UNUSED MPP(19, 0x1, 0, 0, 1) #define MPP20_GPIO MPP(20, 0x0, 1, 1, 1) -#define MPP20_UA1_CTSs MPP(20, 0x4, 1, 0, 1) -#define MPP20_TDM_PCLK MPP(20, 0x6, 1, 1, 0) +#define MPP20_UA1_CTSs MPP(20, 0x4, 0, 0, 1) +#define MPP20_TDM_PCLK MPP(20, 0x6, 0, 0, 0) #define MPP20_UNUSED MPP(20, 0x1, 0, 0, 1) #define MPP21_GPIO MPP(21, 0x0, 1, 1, 1) -#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 1, 1) -#define MPP21_TDM_FSYNC MPP(21, 0x6, 1, 1, 0) +#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 0, 1) +#define MPP21_TDM_FSYNC MPP(21, 0x6, 0, 0, 0) #define MPP21_UNUSED MPP(21, 0x1, 0, 0, 1) #define MPP22_GPIO MPP(22, 0x0, 1, 1, 1) -#define MPP22_UA3_TDX MPP(22, 0x4, 0, 1, 1) -#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 1, 1) -#define MPP22_TDM_DRX MPP(22, 0x6, 1, 0, 1) +#define MPP22_UA3_TDX MPP(22, 0x4, 0, 0, 1) +#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 0, 1) +#define MPP22_TDM_DRX MPP(22, 0x6, 0, 0, 1) #define MPP22_UNUSED MPP(22, 0x1, 0, 0, 1) #define MPP23_GPIO MPP(23, 0x0, 1, 1, 1) -#define MPP23_UA3_RDX MPP(23, 0x4, 1, 0, 1) -#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 1, 1) -#define MPP23_TDM_DTX MPP(23, 0x6, 0, 1, 1) +#define MPP23_UA3_RDX MPP(23, 0x4, 0, 0, 1) +#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 0, 1) +#define MPP23_TDM_DTX MPP(23, 0x6, 0, 0, 1) #define MPP23_UNUSED MPP(23, 0x1, 0, 0, 1) #define MPP24_GPIO MPP(24, 0x0, 1, 1, 1) -#define MPP24_UA2_TXD MPP(24, 0x4, 0, 1, 1) -#define MPP24_TDM_INTn MPP(24, 0x6, 1, 0, 1) +#define MPP24_UA2_TXD MPP(24, 0x4, 0, 0, 1) +#define MPP24_TDM_INTn MPP(24, 0x6, 0, 0, 1) #define MPP24_UNUSED MPP(24, 0x1, 0, 0, 1) #define MPP25_GPIO MPP(25, 0x0, 1, 1, 1) -#define MPP25_UA2_RXD MPP(25, 0x4, 1, 0, 1) -#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 1, 1) +#define MPP25_UA2_RXD MPP(25, 0x4, 0, 0, 1) +#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 0, 1) #define MPP25_UNUSED MPP(25, 0x1, 0, 0, 1) #define MPP26_GPIO MPP(26, 0x0, 1, 1, 1) -#define MPP26_UA2_CTSn MPP(26, 0x4, 1, 0, 1) -#define MPP26_TDM_PCLK MPP(26, 0x6, 1, 1, 1) +#define MPP26_UA2_CTSn MPP(26, 0x4, 0, 0, 1) +#define MPP26_TDM_PCLK MPP(26, 0x6, 0, 0, 1) #define MPP26_UNUSED MPP(26, 0x1, 0, 0, 1) #define MPP27_GPIO MPP(27, 0x0, 1, 1, 1) -#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 1, 1) -#define MPP27_TDM_FSYNC MPP(27, 0x6, 1, 1, 1) +#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 0, 1) +#define MPP27_TDM_FSYNC MPP(27, 0x6, 0, 0, 1) #define MPP27_UNUSED MPP(27, 0x1, 0, 0, 1) #define MPP28_GPIO MPP(28, 0x0, 1, 1, 1) -#define MPP28_UA3_TXD MPP(28, 0x4, 0, 1, 1) -#define MPP28_TDM_DRX MPP(28, 0x6, 1, 0, 1) +#define MPP28_UA3_TXD MPP(28, 0x4, 0, 0, 1) +#define MPP28_TDM_DRX MPP(28, 0x6, 0, 0, 1) #define MPP28_UNUSED MPP(28, 0x1, 0, 0, 1) #define MPP29_GPIO MPP(29, 0x0, 1, 1, 1) -#define MPP29_UA3_RXD MPP(29, 0x4, 1, 0, 1) -#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 1, 1) -#define MPP29_TDM_DTX MPP(29, 0x6, 0, 1, 1) +#define MPP29_UA3_RXD MPP(29, 0x4, 0, 0, 1) +#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 0, 1) +#define MPP29_TDM_DTX MPP(29, 0x6, 0, 0, 1) #define MPP29_UNUSED MPP(29, 0x1, 0, 0, 1) #define MPP30_GPIO MPP(30, 0x0, 1, 1, 1) -#define MPP30_UA3_CTSn MPP(30, 0x4, 1, 0, 1) +#define MPP30_UA3_CTSn MPP(30, 0x4, 0, 0, 1) #define MPP30_UNUSED MPP(30, 0x1, 0, 0, 1) #define MPP31_GPIO MPP(31, 0x0, 1, 1, 1) -#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 1, 1) -#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 1, 1) +#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 0, 1) +#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 0, 1) #define MPP31_UNUSED MPP(31, 0x1, 0, 0, 1) #define MPP32_GPIO MPP(32, 0x1, 1, 1, 1) -#define MPP32_UA3_TDX MPP(32, 0x4, 0, 1, 1) -#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 1, 1) -#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 1, 1) +#define MPP32_UA3_TDX MPP(32, 0x4, 0, 0, 1) +#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 0, 1) +#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 0, 1) #define MPP32_UNUSED MPP(32, 0x3, 0, 0, 1) #define MPP33_GPIO MPP(33, 0x1, 1, 1, 1) -#define MPP33_UA3_RDX MPP(33, 0x4, 1, 0, 1) -#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 1, 1) +#define MPP33_UA3_RDX MPP(33, 0x4, 0, 0, 1) +#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 0, 1) #define MPP33_UNUSED MPP(33, 0x3, 0, 0, 1) #define MPP34_GPIO MPP(34, 0x1, 1, 1, 1) -#define MPP34_UA2_TDX MPP(34, 0x4, 0, 1, 1) -#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 1, 1) +#define MPP34_UA2_TDX MPP(34, 0x4, 0, 0, 1) +#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 0, 1) #define MPP34_UNUSED MPP(34, 0x3, 0, 0, 1) #define MPP35_GPIO MPP(35, 0x1, 1, 1, 1) -#define MPP35_UA2_RDX MPP(35, 0x4, 1, 0, 1) -#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 1, 1) +#define MPP35_UA2_RDX MPP(35, 0x4, 0, 0, 1) +#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 0, 1) #define MPP35_UNUSED MPP(35, 0x3, 0, 0, 1) #define MPP36_GPIO MPP(36, 0x1, 1, 1, 1) -#define MPP36_UA0_CTSn MPP(36, 0x2, 1, 0, 1) -#define MPP36_UA2_TDX MPP(36, 0x4, 0, 1, 1) -#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 1, 1) +#define MPP36_UA0_CTSn MPP(36, 0x2, 0, 0, 1) +#define MPP36_UA2_TDX MPP(36, 0x4, 0, 0, 1) +#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 0, 1) #define MPP36_UNUSED MPP(36, 0x3, 0, 0, 1) #define MPP37_GPIO MPP(37, 0x1, 1, 1, 1) -#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 1, 1) -#define MPP37_UA2_RXD MPP(37, 0x4, 1, 0, 1) -#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 1, 1) -#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 1, 1) +#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 0, 1) +#define MPP37_UA2_RXD MPP(37, 0x4, 0, 0, 1) +#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 0, 1) +#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 0, 1) #define MPP37_UNUSED MPP(37, 0x3, 0, 0, 1) #define MPP38_GPIO MPP(38, 0x1, 1, 1, 1) -#define MPP38_UA1_CTSn MPP(38, 0x2, 1, 0, 1) -#define MPP38_UA3_TXD MPP(38, 0x4, 0, 1, 1) -#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 1, 1) -#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 1, 1) +#define MPP38_UA1_CTSn MPP(38, 0x2, 0, 0, 1) +#define MPP38_UA3_TXD MPP(38, 0x4, 0, 0, 1) +#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 0, 1) +#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 0, 1) #define MPP38_UNUSED MPP(38, 0x3, 0, 0, 1) #define MPP39_GPIO MPP(39, 0x1, 1, 1, 1) -#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 1, 1) -#define MPP39_UA3_RXD MPP(39, 0x4, 1, 0, 1) -#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 1, 1) -#define MPP39_TDM_SMISO MPP(39, 0x6, 1, 0, 1) +#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 0, 1) +#define MPP39_UA3_RXD MPP(39, 0x4, 0, 0, 1) +#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 0, 1) +#define MPP39_TDM_SMISO MPP(39, 0x6, 0, 0, 1) #define MPP39_UNUSED MPP(39, 0x3, 0, 0, 1) #define MPP40_GPIO MPP(40, 0x1, 1, 1, 1) -#define MPP40_TDM_INTn MPP(40, 0x6, 1, 0, 1) +#define MPP40_TDM_INTn MPP(40, 0x6, 0, 0, 1) #define MPP40_UNUSED MPP(40, 0x0, 0, 0, 1) #define MPP41_GPIO MPP(41, 0x1, 1, 1, 1) -#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 1, 1) +#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 0, 1) #define MPP41_UNUSED MPP(41, 0x0, 0, 0, 1) #define MPP42_GPIO MPP(42, 0x1, 1, 1, 1) -#define MPP42_TDM_PCLK MPP(42, 0x6, 1, 1, 1) +#define MPP42_TDM_PCLK MPP(42, 0x6, 0, 0, 1) #define MPP42_UNUSED MPP(42, 0x0, 0, 0, 1) #define MPP43_GPIO MPP(43, 0x1, 1, 1, 1) -#define MPP43_TDM_FSYNC MPP(43, 0x6, 1, 1, 1) +#define MPP43_TDM_FSYNC MPP(43, 0x6, 0, 0, 1) #define MPP43_UNUSED MPP(43, 0x0, 0, 0, 1) #define MPP44_GPIO MPP(44, 0x1, 1, 1, 1) -#define MPP44_TDM_DRX MPP(44, 0x6, 1, 0, 1) +#define MPP44_TDM_DRX MPP(44, 0x6, 0, 0, 1) #define MPP44_UNUSED MPP(44, 0x0, 0, 0, 1) #define MPP45_GPIO MPP(45, 0x1, 1, 1, 1) -#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 1, 1) -#define MPP45_TDM_DRX MPP(45, 0x6, 0, 1, 1) +#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 0, 1) +#define MPP45_TDM_DRX MPP(45, 0x6, 0, 0, 1) #define MPP45_UNUSED MPP(45, 0x0, 0, 0, 1) #define MPP46_GPIO MPP(46, 0x1, 1, 1, 1) -#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 1, 1) +#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 0, 1) #define MPP46_UNUSED MPP(46, 0x0, 0, 0, 1) @@ -323,14 +323,14 @@ #define MPP48_GPIO MPP(48, 0x1, 1, 1, 1) -#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 1, 1) +#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 0, 1) #define MPP48_UNUSED MPP(48, 0x2, 0, 0, 1) #define MPP49_GPIO MPP(49, 0x1, 1, 1, 1) -#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 1, 1) -#define MPP49_M_BB MPP(49, 0x4, 1, 0, 1) +#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 0, 1) +#define MPP49_M_BB MPP(49, 0x4, 0, 0, 1) #define MPP49_UNUSED MPP(49, 0x2, 0, 0, 1) diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 63de2d396e2..14a5971d0d4 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -49,8 +49,9 @@ #define ETH_KS8851_QUART 138 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184 #define OMAP4_SFH7741_ENABLE_GPIO 188 -#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ +#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ +#define HDMI_GPIO_HPD 63 /* Hotplug detect */ static const int sdp4430_keymap[] = { KEY(0, 0, KEY_E), @@ -578,12 +579,8 @@ static void __init omap_sfh7741prox_init(void) static void sdp4430_hdmi_mux_init(void) { - /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */ - omap_mux_init_signal("hdmi_hpd", - OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hdmi_cec", OMAP_PIN_INPUT_PULLUP); - /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */ omap_mux_init_signal("hdmi_ddc_scl", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hdmi_ddc_sda", @@ -591,8 +588,9 @@ static void sdp4430_hdmi_mux_init(void) } static struct gpio sdp4430_hdmi_gpios[] = { - { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, + { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, + { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" }, }; static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) @@ -609,26 +607,21 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev) { - gpio_free(HDMI_GPIO_LS_OE); - gpio_free(HDMI_GPIO_HPD); + gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios)); } +static struct omap_dss_hdmi_data sdp4430_hdmi_data = { + .hpd_gpio = HDMI_GPIO_HPD, +}; + static struct omap_dss_device sdp4430_hdmi_device = { .name = "hdmi", .driver_name = "hdmi_panel", .type = OMAP_DISPLAY_TYPE_HDMI, - .clocks = { - .dispc = { - .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK, - }, - .hdmi = { - .regn = 15, - .regm2 = 1, - }, - }, .platform_enable = sdp4430_panel_enable_hdmi, .platform_disable = sdp4430_panel_disable_hdmi, .channel = OMAP_DSS_CHANNEL_DIGIT, + .data = &sdp4430_hdmi_data, }; static struct omap_dss_device *sdp4430_dss_devices[] = { @@ -645,6 +638,10 @@ void omap_4430sdp_display_init(void) { sdp4430_hdmi_mux_init(); omap_display_init(&sdp4430_dss_data); + + omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN); } #ifdef CONFIG_OMAP_MUX diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index 0cfe2005cb5..107dfc377a8 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -52,8 +52,9 @@ #define GPIO_HUB_NRESET 62 #define GPIO_WIFI_PMENA 43 #define GPIO_WIFI_IRQ 53 -#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */ +#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ +#define HDMI_GPIO_HPD 63 /* Hotplug detect */ /* wl127x BT, FM, GPS connectivity chip */ static int wl1271_gpios[] = {46, -1, -1}; @@ -614,12 +615,8 @@ int __init omap4_panda_dvi_init(void) static void omap4_panda_hdmi_mux_init(void) { - /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */ - omap_mux_init_signal("hdmi_hpd", - OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hdmi_cec", OMAP_PIN_INPUT_PULLUP); - /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */ omap_mux_init_signal("hdmi_ddc_scl", OMAP_PIN_INPUT_PULLUP); omap_mux_init_signal("hdmi_ddc_sda", @@ -627,8 +624,9 @@ static void omap4_panda_hdmi_mux_init(void) } static struct gpio panda_hdmi_gpios[] = { - { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, + { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" }, { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, + { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" }, }; static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) @@ -645,10 +643,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev) { - gpio_free(HDMI_GPIO_LS_OE); - gpio_free(HDMI_GPIO_HPD); + gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios)); } +static struct omap_dss_hdmi_data omap4_panda_hdmi_data = { + .hpd_gpio = HDMI_GPIO_HPD, +}; + static struct omap_dss_device omap4_panda_hdmi_device = { .name = "hdmi", .driver_name = "hdmi_panel", @@ -656,6 +657,7 @@ static struct omap_dss_device omap4_panda_hdmi_device = { .platform_enable = omap4_panda_panel_enable_hdmi, .platform_disable = omap4_panda_panel_disable_hdmi, .channel = OMAP_DSS_CHANNEL_DIGIT, + .data = &omap4_panda_hdmi_data, }; static struct omap_dss_device *omap4_panda_dss_devices[] = { @@ -679,6 +681,10 @@ void omap4_panda_display_init(void) omap4_panda_hdmi_mux_init(); omap_display_init(&omap4_panda_dss_data); + + omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT); + omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN); } static void __init omap4_panda_init(void) diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index c56597172bf..9a1e1f7c109 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -56,11 +56,11 @@ #define RX51_USB_TRANSCEIVER_RST_GPIO 67 -/* list all spi devices here */ +/* List all SPI devices here. Note that the list/probe order seems to matter! */ enum { RX51_SPI_WL1251, - RX51_SPI_MIPID, /* LCD panel */ RX51_SPI_TSC2005, /* Touch Controller */ + RX51_SPI_MIPID, /* LCD panel */ }; static struct wl12xx_platform_data wl1251_pdata; diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 130034bf01d..dfffbbf4c00 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval) case GPMC_CONFIG_DEV_SIZE: regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); + + /* clear 2 target bits */ + regval &= ~GPMC_CONFIG1_DEVICESIZE(3); + + /* set the proper value */ regval |= GPMC_CONFIG1_DEVICESIZE(wval); + gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval); break; diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c index ab8b35b780b..06274948347 100644 --- a/arch/arm/mach-omap2/opp.c +++ b/arch/arm/mach-omap2/opp.c @@ -53,7 +53,7 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def, omap_table_init = 1; /* Lets now register with OPP library */ - for (i = 0; i < opp_def_size; i++) { + for (i = 0; i < opp_def_size; i++, opp_def++) { struct omap_hwmod *oh; struct device *dev; @@ -86,7 +86,6 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def, __func__, opp_def->freq, opp_def->hwmod_name, i, r); } - opp_def++; } return 0; diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 0ab531d047f..8a98da0b3f8 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "common.h" @@ -72,7 +73,8 @@ void __init orion5x_map_io(void) void __init orion5x_ehci0_init(void) { orion_ehci_init(&orion5x_mbus_dram_info, - ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL); + ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL, + EHCI_PHY_ORION); } diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h index eac68978a2c..db70e79a119 100644 --- a/arch/arm/mach-orion5x/mpp.h +++ b/arch/arm/mach-orion5x/mpp.h @@ -65,8 +65,8 @@ #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) +#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1) +#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1) #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h index 654adc90c9a..301bf0eefe1 100644 --- a/arch/arm/mach-pxa/include/mach/smemc.h +++ b/arch/arm/mach-pxa/include/mach/smemc.h @@ -37,6 +37,7 @@ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */ +#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */ /* * More handy macros for PCMCIA diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c index 79923058d10..f38aa890b2c 100644 --- a/arch/arm/mach-pxa/smemc.c +++ b/arch/arm/mach-pxa/smemc.c @@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void) __raw_writel(csadrcfg[1], CSADRCFG1); __raw_writel(csadrcfg[2], CSADRCFG2); __raw_writel(csadrcfg[3], CSADRCFG3); + /* CSMSADRCFG wakes up in its default state (0), so we need to set it */ + __raw_writel(0x2, CSMSADRCFG); } static struct syscore_ops smemc_syscore_ops = { @@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = { static int __init smemc_init(void) { - if (cpu_is_pxa3xx()) + if (cpu_is_pxa3xx()) { + /* + * The only documentation we have on the + * Chip Select Configuration Register (CSMSADRCFG) is that + * it must be programmed to 0x2. + * Moreover, in the bit definitions, the second bit + * (CSMSADRCFG[1]) is called "SETALWAYS". + * Other bits are reserved in this register. + */ + __raw_writel(0x2, CSMSADRCFG); + register_syscore_ops(&smemc_syscore_ops); + } return 0; } diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 9a9706cf149..6ebdb0d0382 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -7,6 +7,7 @@ config UX500_SOC_COMMON select HAS_MTU select ARM_ERRATA_753970 select ARM_ERRATA_754322 + select ARM_ERRATA_764369 menu "Ux500 SoC" diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b676c96ae56..824e6058a85 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -721,7 +721,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) unsigned long instr = 0, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; - mm_segment_t fs; unsigned int fault; u16 tinstr = 0; int isize = 4; @@ -731,16 +730,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) instrptr = instruction_pointer(regs); - fs = get_fs(); - set_fs(KERNEL_DS); if (thumb_mode(regs)) { - fault = __get_user(tinstr, (u16 *)(instrptr & ~1)); + u16 *ptr = (u16 *)(instrptr & ~1); + fault = probe_kernel_address(ptr, tinstr); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ u16 tinst2 = 0; - fault = __get_user(tinst2, (u16 *)(instrptr+2)); + fault = probe_kernel_address(ptr + 1, tinst2); instr = (tinstr << 16) | tinst2; thumb2_32b = 1; } else { @@ -749,8 +747,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } } } else - fault = __get_user(instr, (u32 *)instrptr); - set_fs(fs); + fault = probe_kernel_address(instrptr, instr); if (fault) { type = TYPE_FAULT; diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e0b0e7a4ec6..09f885161b3 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -342,6 +342,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 5cd1b0530f5..295baef75e0 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -54,9 +54,15 @@ loop1: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache +#ifdef CONFIG_PREEMPT + save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic +#endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr +#ifdef CONFIG_PREEMPT + restore_irqs_notrace r9 +#endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) ldr r4, =0x3ff @@ -205,6 +211,9 @@ ENTRY(v7_coherent_user_range) * isn't mapped, just try the next page. */ 9001: +#ifdef CONFIG_ARM_ERRATA_775420 + dsb +#endif mov r12, r12, lsr #12 mov r12, r12, lsl #12 add r12, r12, #4096 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 340c2d06993..875114ff648 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -472,25 +472,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { + unsigned long pfn; + size_t left = size; + + pfn = page_to_pfn(page) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ - size_t left = size; do { size_t len = left; void *vaddr; + page = pfn_to_page(pfn); + if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; - } + if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; - } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; @@ -507,7 +509,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); } offset = 0; - page++; + pfn++; left -= len; } while (left); } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 7475c2e1629..8d43285ca84 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -275,7 +275,9 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, return fault; check_stack: - if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) + /* Don't allow expansion below FIRST_USER_ADDRESS */ + if (vma->vm_flags & VM_GROWSDOWN && + addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) goto good_area; out: return fault; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1a8d4aa821b..8fda9f70320 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -236,8 +236,6 @@ void __sync_icache_dcache(pte_t pteval) struct page *page; struct address_space *mapping; - if (!pte_present_user(pteval)) - return; if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) /* only flush non-aliasing VIPT caches for exec mappings */ return; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 0f79b76c912..7fcfb483b16 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -496,7 +496,7 @@ static void __init build_mem_type_table(void) } for (i = 0; i < 16; i++) { - unsigned long v = pgprot_val(protection_map[i]); + pteval_t v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); } diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 59430058fc3..7d2592f1f91 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -287,10 +287,6 @@ cpu_resume_l1_flags: * Initialise TLB, Caches, and MMU state ready to switch the MMU * on. Return in r0 the new CP15 C1 control register setting. * - * We automatically detect if we have a Harvard cache, and use the - * Harvard cache control instructions insead of the unified cache - * control instructions. - * * This should be able to cover all ARMv7 cores. * * It is assumed that: @@ -372,9 +368,7 @@ __v7_setup: mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register #endif #ifdef CONFIG_ARM_ERRATA_743622 - teq r6, #0x20 @ present in r2p0 - teqne r6, #0x21 @ present in r2p1 - teqne r6, #0x22 @ present in r2p2 + teq r5, #0x00200000 @ only present in r2p* mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register orreq r10, r10, #1 << 6 @ set bit #6 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register @@ -387,9 +381,7 @@ __v7_setup: #endif 3: mov r10, #0 -#ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate -#endif dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs @@ -432,6 +424,18 @@ __v7_setup: ldr r6, =NMRR @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR +#endif +#ifndef CONFIG_ARM_THUMBEE + mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE + and r0, r0, #(0xf << 12) @ ThumbEE enabled field + teq r0, #(1 << 12) @ check if ThumbEE is present + bne 1f + mov r5, #0 + mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0 + mrc p14, 6, r0, c0, c0, 0 @ load TEECR + orr r0, r0, #1 @ set the 1st bit in order to + mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access +1: #endif adr r5, v7_crval ldmia r5, {r5, r6} diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 9e5451b3c8e..214b002556e 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -343,7 +343,7 @@ static struct resource orion_ge10_shared_resources[] = { static struct platform_device orion_ge10_shared = { .name = MV643XX_ETH_SHARED_NAME, - .id = 1, + .id = 2, .dev = { .platform_data = &orion_ge10_shared_data, }, @@ -358,8 +358,8 @@ static struct resource orion_ge10_resources[] = { static struct platform_device orion_ge10 = { .name = MV643XX_ETH_NAME, - .id = 1, - .num_resources = 2, + .id = 2, + .num_resources = 1, .resource = orion_ge10_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), @@ -397,7 +397,7 @@ static struct resource orion_ge11_shared_resources[] = { static struct platform_device orion_ge11_shared = { .name = MV643XX_ETH_SHARED_NAME, - .id = 1, + .id = 3, .dev = { .platform_data = &orion_ge11_shared_data, }, @@ -412,8 +412,8 @@ static struct resource orion_ge11_resources[] = { static struct platform_device orion_ge11 = { .name = MV643XX_ETH_NAME, - .id = 1, - .num_resources = 2, + .id = 3, + .num_resources = 1, .resource = orion_ge11_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), @@ -806,10 +806,7 @@ void __init orion_xor1_init(unsigned long mapbase_low, /***************************************************************************** * EHCI ****************************************************************************/ -static struct orion_ehci_data orion_ehci_data = { - .phy_version = EHCI_PHY_NA, -}; - +static struct orion_ehci_data orion_ehci_data; static u64 ehci_dmamask = DMA_BIT_MASK(32); @@ -830,9 +827,11 @@ static struct platform_device orion_ehci = { void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info, unsigned long mapbase, - unsigned long irq) + unsigned long irq, + enum orion_ehci_phy_ver phy_version) { orion_ehci_data.dram = mbus_dram_info; + orion_ehci_data.phy_version = phy_version; fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1, irq); diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h index a63c357e2ab..a2c0e31ce0d 100644 --- a/arch/arm/plat-orion/include/plat/common.h +++ b/arch/arm/plat-orion/include/plat/common.h @@ -95,7 +95,8 @@ void __init orion_xor1_init(unsigned long mapbase_low, void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info, unsigned long mapbase, - unsigned long irq); + unsigned long irq, + enum orion_ehci_phy_ver phy_version); void __init orion_ehci_1_init(struct mbus_dram_target_info *mbus_dram_info, unsigned long mapbase, diff --git a/arch/arm/plat-orion/mpp.c b/arch/arm/plat-orion/mpp.c index 91553432711..3b1e17bd3d1 100644 --- a/arch/arm/plat-orion/mpp.c +++ b/arch/arm/plat-orion/mpp.c @@ -64,8 +64,7 @@ void __init orion_mpp_conf(unsigned int *mpp_list, unsigned int variant_mask, gpio_mode |= GPIO_INPUT_OK; if (*mpp_list & MPP_OUTPUT_MASK) gpio_mode |= GPIO_OUTPUT_OK; - if (sel != 0) - gpio_mode = 0; + orion_gpio_set_valid(num, gpio_mode); } diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 539bd0e3def..9f422ba5b1d 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -431,7 +431,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan) * when necessary. */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1249,7 +1249,7 @@ static void s3c2410_dma_resume(void) struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; int channel; - for (channel = dma_channels - 1; channel >= 0; cp++, channel--) + for (channel = dma_channels - 1; channel >= 0; cp--, channel--) s3c2410_dma_resume_chan(cp); } diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index e8f2be2d67f..df14954aa1c 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c @@ -143,11 +143,13 @@ int s3c_adc_start(struct s3c_adc_client *client, return -EINVAL; } - if (client->is_ts && adc->ts_pend) - return -EAGAIN; - spin_lock_irqsave(&adc->lock, flags); + if (client->is_ts && adc->ts_pend) { + spin_unlock_irqrestore(&adc->lock, flags); + return -EAGAIN; + } + client->channel = channel; client->nr_samples = nr_samples; diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index dcce3f6aef8..7f74bb3d8b7 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -369,7 +369,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) * If there isn't a second FP instruction, exit now. Note that * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1. */ - if (fpexc ^ (FPEXC_EX | FPEXC_FP2V)) + if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) goto exit; /* @@ -605,11 +605,14 @@ static int __init vfp_init(void) elf_hwcap |= HWCAP_VFPv3; /* - * Check for VFPv3 D16. CPUs in this configuration - * only have 16 x 64bit registers. + * Check for VFPv3 D16 and VFPv4 D16. CPUs in + * this configuration only have 16 x 64bit + * registers. */ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) - elf_hwcap |= HWCAP_VFPv3D16; + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ + else + elf_hwcap |= HWCAP_VFPD32; } #endif /* diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index e9d689b7c83..c614484f0fc 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -8,6 +8,7 @@ config AVR32 select HAVE_KPROBES select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_PROBE + select GENERIC_ATOMIC64 select HARDIRQS_SW_RESEND select GENERIC_IRQ_SHOW help diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h index 8790dfc10d5..e6952a01c5d 100644 --- a/arch/avr32/include/asm/signal.h +++ b/arch/avr32/include/asm/signal.h @@ -128,6 +128,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index a727f54d64d..9c266abc884 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c @@ -271,7 +271,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, break; case R_AVR32_GOT18SW: if ((relocation & 0xfffe0003) != 0 - && (relocation & 0xfffc0003) != 0xffff0000) + && (relocation & 0xfffc0000) != 0xfffc0000) return reloc_overflow(module, "R_AVR32_GOT18SW", relocation); relocation >>= 2; diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h index 32567bc2a42..ac12ae2b928 100644 --- a/arch/cris/include/asm/io.h +++ b/arch/cris/include/asm/io.h @@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr) #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0) #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0) #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0) -#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1) -#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1) -#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1) -#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count) -#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count) -#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count) +static inline void outb(unsigned char data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 1, 1); +} +static inline void outw(unsigned short data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 2, 1); +} +static inline void outl(unsigned int data, unsigned int port) +{ + if (cris_iops) + cris_iops->write_io(port, (void *) &data, 4, 1); +} +static inline void outsb(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 1, count); +} +static inline void outsw(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 2, count); +} +static inline void outsl(unsigned int port, const void *addr, + unsigned long count) +{ + if (cris_iops) + cris_iops->write_io(port, (void *)addr, 4, count); +} /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h index ea6af9aad76..057fea2db9e 100644 --- a/arch/cris/include/asm/signal.h +++ b/arch/cris/include/asm/signal.h @@ -122,6 +122,7 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h index fd8b66e40dc..86957072d16 100644 --- a/arch/h8300/include/asm/signal.h +++ b/arch/h8300/include/asm/signal.h @@ -121,6 +121,7 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 44688143967..6fcc9a081e4 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -18,8 +18,8 @@ #include -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 8428525ddb2..1bd14d5c478 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -111,11 +111,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, unsigned long prev; __asm__ __volatile__( " mf;; \n" - " mov ar.ccv=%3;; \n" - "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" + " mov ar.ccv=%4;; \n" + "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (prev) + : "+r" (r8), "=&r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 43f96ab18fa..8c709616871 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; extern int cpe_vector; extern int ia64_cpe_irq; extern void ia64_mca_init(void); +extern void ia64_mca_irq_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 7c928da35b1..d8de1825b73 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -321,11 +321,12 @@ #define __NR_syncfs 1329 #define __NR_setns 1330 #define __NR_sendmmsg 1331 +#define __NR_accept4 1334 #ifdef __KERNEL__ -#define NR_syscalls 308 /* length of syscall table */ +#define NR_syscalls 311 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 3be485a300b..f19de9f7f5f 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; static struct acpi_table_slit __initdata *slit_table; cpumask_t early_cpu_possible_map = CPU_MASK_NONE; -static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) +static int __init +get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { int pxm; pxm = pa->proximity_domain_lo; - if (ia64_platform_is("sn2")) + if (ia64_platform_is("sn2") || acpi_srat_revision >= 2) pxm += pa->proximity_domain_hi[0] << 8; return pxm; } -static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) +static int __init +get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) { int pxm; pxm = ma->proximity_domain; - if (!ia64_platform_is("sn2")) + if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1) pxm &= 0xff; return pxm; diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 97dd2abdeb1..df477f8c9d8 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1777,6 +1777,9 @@ sys_call_table: data8 sys_syncfs data8 sys_setns // 1330 data8 sys_sendmmsg + data8 sys_ni_syscall /* process_vm_readv */ + data8 sys_ni_syscall /* process_vm_writev */ + data8 sys_accept4 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606613e..f2c41828113 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@ #include #include +#include + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ + ia64_mca_irq_init(); + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 782c3a357f2..3540c5e8042 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -23,7 +23,6 @@ #include #include #include -#include /* for rand_initialize_irq() */ #include #include #include diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 84fb405eee8..9b97303b8c4 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2071,22 +2071,16 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } + /* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init(). */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void) { - if (!mca_init) - return 0; - /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2105,6 +2099,23 @@ ia64_mca_late_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif +} + +/* + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status + */ +static int __init +ia64_mca_late_init(void) +{ + if (!mca_init) + return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 8213efe1998..a874213fbc9 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1168,6 +1168,11 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) #define PALE_RESET_ENTRY 0x80000000ffffffb0UL +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ + return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); +} + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_vcpu *v; diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 4332f7ee520..a7869f8f49a 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) "srlz.d;;" "ssm psr.i;;" "srlz.d;;" - : "=r"(ret) : "r"(iha), "r"(pte):"memory"); + : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); return ret; } diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h index b2eeb0de1c8..802d5616e8e 100644 --- a/arch/m32r/include/asm/signal.h +++ b/arch/m32r/include/asm/signal.h @@ -123,6 +123,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h index 73b8c8fbed9..bdace4bb29d 100644 --- a/arch/m68k/include/asm/entry_mm.h +++ b/arch/m68k/include/asm/entry_mm.h @@ -35,8 +35,8 @@ /* the following macro is used when enabling interrupts */ #if defined(MACH_ATARI_ONLY) - /* block out HSYNC on the atari */ -#define ALLOWINT (~0x400) + /* block out HSYNC = ipl 2 on the atari */ +#define ALLOWINT (~0x500) #define MAX_NOINT_IPL 3 #else /* portable version */ diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h index 5bc09c787a1..ee80858e0c8 100644 --- a/arch/m68k/include/asm/signal.h +++ b/arch/m68k/include/asm/signal.h @@ -119,6 +119,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; @@ -156,7 +157,7 @@ typedef struct sigaltstack { static inline void sigaddset(sigset_t *set, int _sig) { asm ("bfset %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -164,7 +165,7 @@ static inline void sigaddset(sigset_t *set, int _sig) static inline void sigdelset(sigset_t *set, int _sig) { asm ("bfclr %0{%1,#1}" - : "+od" (*set) + : "+o" (*set) : "id" ((_sig - 1) ^ 31) : "cc"); } @@ -180,7 +181,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) int ret; asm ("bfextu %1{%2,#1},%0" : "=d" (ret) - : "od" (*set), "id" ((_sig-1) ^ 31) + : "o" (*set), "id" ((_sig-1) ^ 31) : "cc"); return ret; } diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 8623f8dc16f..9a5932ec368 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, goto bad_access; } - mem_value = *mem; + /* + * No need to check for EFAULT; we know that the page is + * present and writable. + */ + __get_user(mem_value, mem); if (mem_value == oldval) - *mem = newval; + __put_user(newval, mem); pte_unmap_unlock(pte, ptl); up_read(&mm->mmap_sem); diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index c247de02bc7..1918d76aa06 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -950,6 +950,9 @@ int __init mac_platform_init(void) { u8 *swim_base; + if (!MACH_IS_MAC) + return -ENODEV; + /* * Serial devices */ diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 884819cd060..9aa60f64012 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -236,7 +236,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)" LDFLAGS += -m $(ld-emul) ifdef CONFIG_MIPS -CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \ +CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ sed -e 's/^\#define /-D/' -e "s/ /='/" -e "s/$$/'/") ifdef CONFIG_64BIT diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 97f8bf6639e..adda036bba1 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -60,6 +60,8 @@ struct thread_info { register struct thread_info *__current_thread_info __asm__("$28"); #define current_thread_info() __current_thread_info +#endif /* !__ASSEMBLY__ */ + /* thread information allocation */ #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT) #define THREAD_SIZE_ORDER (1) @@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28"); #define free_thread_info(info) kfree(info) -#endif /* !__ASSEMBLY__ */ - #define PREEMPT_ACTIVE 0x10000000 /* diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 83bba332bbf..8b3c62c66eb 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -100,7 +100,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o obj-$(CONFIG_OF) += prom.o -CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) +CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index f4546e97c60..23817a6e32b 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; +#ifdef CONFIG_KPROBES + /* + * Return immediately if the kprobes fault notifier has set + * DIE_PAGE_FAULT. + */ + if (cmd == DIE_PAGE_FAULT) + return NOTIFY_DONE; +#endif /* CONFIG_KPROBES */ + /* Userspace events, ignore. */ if (user_mode(regs)) return NOTIFY_DONE; diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index a81176f44c7..be281c69f6c 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -1,5 +1,6 @@ #include #include +#include #include #undef mips @@ -73,7 +74,7 @@ SECTIONS .data : { /* Data */ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ - INIT_TASK_DATA(PAGE_SIZE) + INIT_TASK_DATA(THREAD_SIZE) NOSAVE_DATA CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile index 7120282bf0d..3eb4a52ff9a 100644 --- a/arch/mn10300/Makefile +++ b/arch/mn10300/Makefile @@ -26,7 +26,7 @@ CHECKFLAGS += PROCESSOR := unset UNIT := unset -KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33 +KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,) KBUILD_AFLAGS += -mam33 -DCPU=AM33 ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y) diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h index 1865d72a86f..eecaa7693ef 100644 --- a/arch/mn10300/include/asm/signal.h +++ b/arch/mn10300/include/asm/signal.h @@ -131,6 +131,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 26fd1146dda..3706cf04100 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -248,7 +248,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) +#define ATOMIC_INIT(i) { (i) } #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() @@ -257,7 +257,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC64_INIT(i) { (i) } static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 22dadeb5869..9d35a3eac24 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -12,11 +12,10 @@ #include #include +#include #include #include -struct vm_area_struct; - /* * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * memory. For the return value to be meaningful, ADDR must be >= @@ -40,7 +39,14 @@ struct vm_area_struct; do{ \ *(pteptr) = (pteval); \ } while(0) -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +extern void purge_tlb_entries(struct mm_struct *, unsigned long); + +#define set_pte_at(mm, addr, ptep, pteval) \ + do { \ + set_pte(ptep, pteval); \ + purge_tlb_entries(mm, addr); \ + } while (0) #endif /* !__ASSEMBLY__ */ @@ -464,6 +470,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, old = pte_val(*ptep); new = pte_val(pte_wrprotect(__pte (old))); } while (cmpxchg((unsigned long *) ptep, old, new) != old); + purge_tlb_entries(mm, addr); #else pte_t old_pte = *ptep; set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h index c5edc60c059..1ee7c82672c 100644 --- a/arch/parisc/include/asm/prefetch.h +++ b/arch/parisc/include/asm/prefetch.h @@ -21,7 +21,12 @@ #define ARCH_HAS_PREFETCH static inline void prefetch(const void *addr) { - __asm__("ldw 0(%0), %%r0" : : "r" (addr)); + __asm__( +#ifndef CONFIG_PA20 + /* Need to avoid prefetch of NULL on PA7300LC */ + " extrw,u,= %0,31,32,%%r0\n" +#endif + " ldw 0(%0), %%r0" : : "r" (addr)); } /* LDD is a PA2.0 addition. */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 83335f3da5f..5241698ede6 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -421,6 +421,24 @@ void kunmap_parisc(void *addr) EXPORT_SYMBOL(kunmap_parisc); #endif +void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) +{ + unsigned long flags; + + /* Note: purge_tlb_entries can be called at startup with + no context. */ + + /* Disable preemption while we play with %sr1. */ + preempt_disable(); + mtsp(mm->context, 1); + purge_tlb_start(flags); + pdtlb(addr); + pitlb(addr); + purge_tlb_end(flags); + preempt_enable(); +} +EXPORT_SYMBOL(purge_tlb_entries); + void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 6f059443914..07ef351edd5 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -552,7 +552,7 @@ * entry (identifying the physical page) and %r23 up with * the from tlb entry (or nothing if only a to entry---for * clear_user_page_asm) */ - .macro do_alias spc,tmp,tmp1,va,pte,prot,fault + .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) @@ -581,7 +581,15 @@ */ cmpiclr,= 0x01,\tmp,%r0 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot +.ifc \patype,20 depd,z \prot,8,7,\prot +.else +.ifc \patype,11 + depw,z \prot,8,7,\prot +.else + .error "undefined PA type to do_alias" +.endif +.endif /* * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. @@ -1185,7 +1193,7 @@ dtlb_miss_20w: nop dtlb_check_alias_20w: - do_alias spc,t0,t1,va,pte,prot,dtlb_fault + do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 idtlbt pte,prot @@ -1209,7 +1217,7 @@ nadtlb_miss_20w: nop nadtlb_check_alias_20w: - do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 idtlbt pte,prot @@ -1241,7 +1249,7 @@ dtlb_miss_11: nop dtlb_check_alias_11: - do_alias spc,t0,t1,va,pte,prot,dtlb_fault + do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11 idtlba pte,(va) idtlbp prot,(va) @@ -1273,7 +1281,7 @@ nadtlb_miss_11: nop nadtlb_check_alias_11: - do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11 idtlba pte,(va) idtlbp prot,(va) @@ -1300,7 +1308,7 @@ dtlb_miss_20: nop dtlb_check_alias_20: - do_alias spc,t0,t1,va,pte,prot,dtlb_fault + do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20 idtlbt pte,prot @@ -1326,7 +1334,7 @@ nadtlb_miss_20: nop nadtlb_check_alias_20: - do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20 idtlbt pte,prot @@ -1453,7 +1461,7 @@ naitlb_miss_20w: nop naitlb_check_alias_20w: - do_alias spc,t0,t1,va,pte,prot,naitlb_fault + do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 iitlbt pte,prot @@ -1507,7 +1515,7 @@ naitlb_miss_11: nop naitlb_check_alias_11: - do_alias spc,t0,t1,va,pte,prot,itlb_fault + do_alias spc,t0,t1,va,pte,prot,itlb_fault,11 iitlba pte,(%sr0, va) iitlbp prot,(%sr0, va) @@ -1553,7 +1561,7 @@ naitlb_miss_20: nop naitlb_check_alias_20: - do_alias spc,t0,t1,va,pte,prot,naitlb_fault + do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20 iitlbt pte,prot diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 93ff3d90edd..5d7218ad885 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm) /* Purge any old translation */ - pitlb (%sr0,%r28) + pitlb (%sr4,%r28) ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r1 @@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm) sub %r25, %r1, %r25 -1: fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) - fic,m %r1(%r28) + /* fic only has the type 26 form on PA1.1, requiring an + * explicit space specification, so use %sr4 */ +1: fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) + fic,m %r1(%sr4,%r28) cmpb,COND(<<) %r28, %r25,1b - fic,m %r1(%r28) + fic,m %r1(%sr4,%r28) sync bv %r0(%r2) - pitlb (%sr0,%r25) + pitlb (%sr4,%r25) .exit .procend diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index e1413243076..d0ea054bd3d 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) { compat_sigset_t s; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; sigset_64to32(&s, set); return copy_to_user(up, &s, sizeof s); @@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) compat_sigset_t s; int r; - if (sz != sizeof *set) panic("put_sigset32()"); + if (sz != sizeof *set) + return -EINVAL; if ((r = copy_from_user(&s, up, sz)) == 0) { sigset_32to64(set, &s); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index c9b932260f4..7ea75d14aa6 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, struct vm_area_struct *vma; int offset = mapping ? get_offset(mapping) : 0; + offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; + addr = DCACHE_ALIGN(addr - offset) + offset; for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index fa6f2b8163e..64a999882e4 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -50,8 +50,10 @@ SECTIONS . = KERNEL_BINARY_TEXT_START; _text = .; /* Text and read-only data */ - .text ALIGN(16) : { + .head ALIGN(16) : { HEAD_TEXT + } = 0 + .text ALIGN(16) : { TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -65,7 +67,7 @@ SECTIONS *(.fixup) *(.lock.text) /* out-of-line lock text */ *(.gnu.warning) - } = 0 + } /* End of text section */ _etext = .; diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 1cf20bdfbec..33a35801f7c 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct) /* * Convert cputime <-> microseconds */ -extern u64 __cputime_msec_factor; +extern u64 __cputime_usec_factor; static inline unsigned long cputime_to_usecs(const cputime_t ct) { - return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; + return mulhdu(ct, __cputime_usec_factor); } static inline cputime_t usecs_to_cputime(const unsigned long us) @@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) sec = us / 1000000; if (ct) { ct *= tb_ticks_per_sec; - do_div(ct, 1000); + do_div(ct, 1000000); } if (sec) ct += (cputime_t) sec * tb_ticks_per_sec; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c5cae0dd176..764e99c750d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1000,7 +1000,8 @@ /* Macros for setting and retrieving special purpose registers */ #ifndef __ASSEMBLY__ #define mfmsr() ({unsigned long rval; \ - asm volatile("mfmsr %0" : "=r" (rval)); rval;}) + asm volatile("mfmsr %0" : "=r" (rval) : \ + : "memory"); rval;}) #ifdef CONFIG_PPC_BOOK3S_64 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ : : "r" (v) : "memory") diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index 3eb13be11d8..ec63a0a4e2c 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -109,6 +109,7 @@ struct sigaction { __sigrestore_t sa_restorer; sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index fe6f7c2c9c6..bc3c745cb90 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); extern void secondary_cpu_time_init(void); extern void iSeries_time_init_early(void); +extern void decrementer_check_overflow(void); + #endif /* __KERNEL__ */ #endif /* __POWERPC_TIME_H */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 36e1c8a29be..4c12a751bea 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -75,6 +75,7 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); + DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9fb933248ab..421c9a0ec87 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -268,7 +268,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_features = CPU_FTRS_PPC970, .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTR_HPTE_TABLE, + .mmu_features = MMU_FTRS_PPC970, .icache_bsize = 128, .dcache_bsize = 128, .num_pmcs = 8, diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index d834425186a..654fc535ed0 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -380,6 +380,12 @@ _GLOBAL(ret_from_fork) li r3,0 b syscall_exit + .section ".toc","aw" +DSCR_DEFAULT: + .tc dscr_default[TC],dscr_default + + .section ".text" + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -519,9 +525,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) - ld r6,_CCR(r1) - mtcrf 0xFF,r6 - #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) @@ -530,14 +533,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION + lwz r6,THREAD_DSCR_INHERIT(r4) + ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) - cmpd r0,r25 - beq 1f + cmpwi r6,0 + bne 1f + ld r0,0(r7) +1: cmpd r0,r25 + beq 2f mtspr SPRN_DSCR,r0 -1: +2: END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif + ld r6,_CCR(r1) + mtcrf 0xFF,r6 + /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index bf99cfa6bbf..63240081133 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod, /* * On PPC32 the trampoline looks like: - * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha - * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l - * 0x7d, 0x69, 0x03, 0xa6 mtctr r11 + * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha + * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l + * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 * 0x4e, 0x80, 0x04, 0x20 bctr */ @@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod, pr_devel(" %08x %08x ", jmp[0], jmp[1]); /* verify that this is what we expect it to be */ - if (((jmp[0] & 0xffff0000) != 0x3d600000) || - ((jmp[1] & 0xffff0000) != 0x396b0000) || - (jmp[2] != 0x7d6903a6) || + if (((jmp[0] & 0xffff0000) != 0x3d800000) || + ((jmp[1] & 0xffff0000) != 0x398c0000) || + (jmp[2] != 0x7d8903a6) || (jmp[3] != 0x4e800420)) { printk(KERN_ERR "Not a trampoline\n"); return -EINVAL; diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index ba504099844..a5031c35aa2 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -425,7 +425,7 @@ _STATIC(__after_prom_start) tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ #endif -#ifdef CONFIG_CRASH_DUMP +#ifdef CONFIG_RELOCATABLE /* * Check if the kernel has to be running as relocatable kernel based on the * variable __run_at_load, if it is set the kernel is treated as relocatable @@ -492,6 +492,7 @@ _GLOBAL(copy_and_flush) sync addi r5,r5,8 addi r6,r6,8 + isync blr .align 8 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5b428e30866..ca2987d939f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -170,16 +170,13 @@ notrace void arch_local_irq_restore(unsigned long en) */ local_paca->hard_enabled = en; -#ifndef CONFIG_BOOKE - /* On server, re-trigger the decrementer if it went negative since - * some processors only trigger on edge transitions of the sign bit. - * - * BookE has a level sensitive decrementer (latches in TSR) so we - * don't need that + /* + * Trigger the decrementer if we have a pending event. Some processors + * only trigger on edge transitions of the sign bit. We might also + * have disabled interrupts long enough that the decrementer wrapped + * to positive. */ - if ((int)mfspr(SPRN_DEC) < 0) - mtspr(SPRN_DEC, 1); -#endif /* CONFIG_BOOKE */ + decrementer_check_overflow(); /* * Force the delivery of pending soft-disabled interrupts on PS3. diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 583af70c4b1..cac9d2c4d0e 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -163,6 +163,8 @@ static int kexec_all_irq_disabled = 0; static void kexec_smp_down(void *arg) { local_irq_disable(); + hard_irq_disable(); + mb(); /* make sure our irqs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; while(kexec_all_irq_disabled == 0) @@ -245,6 +247,8 @@ static void kexec_prepare_cpus(void) wake_offline_cpus(); smp_call_function(kexec_smp_down, NULL, /* wait */0); local_irq_disable(); + hard_irq_disable(); + mb(); /* make sure IRQs are disabled before we say they are */ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; @@ -282,6 +286,7 @@ static void kexec_prepare_cpus(void) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 0); local_irq_disable(); + hard_irq_disable(); } #endif /* SMP */ diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index f832773fc28..449a7e053e6 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -187,8 +187,8 @@ int apply_relocate(Elf32_Shdr *sechdrs, static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) { - if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) - && entry->jump[1] == 0x396b0000 + (val & 0xffff)) + if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16) + && entry->jump[1] == 0x398c0000 + (val & 0xffff)) return 1; return 0; } @@ -215,10 +215,9 @@ static uint32_t do_plt_call(void *location, entry++; } - /* Stolen from Paul Mackerras as well... */ - entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ - entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ - entry->jump[2] = 0x7d6903a6; /* mtctr r11 */ + entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */ + entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/ + entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ entry->jump[3] = 0x4e800420; /* bctr */ DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 822f63008ae..5793c4ba5a0 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; + unsigned long val; if (!event->hw.idx || !event->hw.sample_period) return; @@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags) event->hw.state = 0; left = local64_read(&event->hw.period_left); - write_pmc(event->hw.idx, left); + + val = 0; + if (left < 0x80000000L) + val = 0x80000000L - left; + + write_pmc(event->hw.idx, val); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 91e52df3d81..5596397ce95 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -794,16 +794,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { - if (current->thread.dscr_inherit) { - p->thread.dscr_inherit = 1; - p->thread.dscr = current->thread.dscr; - } else if (0 != dscr_default) { - p->thread.dscr_inherit = 1; - p->thread.dscr = dscr_default; - } else { - p->thread.dscr_inherit = 0; - p->thread.dscr = 0; - } + p->thread.dscr_inherit = current->thread.dscr_inherit; + p->thread.dscr = current->thread.dscr; } #endif diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index cb22024f2b4..9321d0f4b6a 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -1497,9 +1497,14 @@ long arch_ptrace(struct task_struct *child, long request, if (index < PT_FPR0) { tmp = ptrace_get_reg(child, (int) index); } else { + unsigned int fpidx = index - PT_FPR0; + flush_fp_to_thread(child); - tmp = ((unsigned long *)child->thread.fpr) - [TS_FPRWIDTH * (index - PT_FPR0)]; + if (fpidx < (PT_FPSCR - PT_FPR0)) + tmp = ((unsigned long *)child->thread.fpr) + [fpidx * TS_FPRWIDTH]; + else + tmp = child->thread.fpscr.val; } ret = put_user(tmp, datalp); break; @@ -1525,9 +1530,14 @@ long arch_ptrace(struct task_struct *child, long request, if (index < PT_FPR0) { ret = ptrace_put_reg(child, index, data); } else { + unsigned int fpidx = index - PT_FPR0; + flush_fp_to_thread(child); - ((unsigned long *)child->thread.fpr) - [TS_FPRWIDTH * (index - PT_FPR0)] = data; + if (fpidx < (PT_FPSCR - PT_FPR0)) + ((unsigned long *)child->thread.fpr) + [fpidx * TS_FPRWIDTH] = data; + else + child->thread.fpscr.val = data; ret = 0; } break; diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index f0f2199e64e..cd51a5c7137 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -192,6 +192,14 @@ static ssize_t show_dscr_default(struct sysdev_class *class, return sprintf(buf, "%lx\n", dscr_default); } +static void update_dscr(void *dummy) +{ + if (!current->thread.dscr_inherit) { + current->thread.dscr = dscr_default; + mtspr(SPRN_DSCR, dscr_default); + } +} + static ssize_t __used store_dscr_default(struct sysdev_class *class, struct sysdev_class_attribute *attr, const char *buf, size_t count) @@ -204,6 +212,8 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class, return -EINVAL; dscr_default = val; + on_each_cpu(update_dscr, NULL, 1); + return count; } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 03b29a6759a..818d809e22f 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq); #ifdef CONFIG_VIRT_CPU_ACCOUNTING /* * Factors for converting from cputime_t (timebase ticks) to - * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). + * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). * These are all stored as 0.64 fixed-point binary fractions. */ u64 __cputime_jiffies_factor; EXPORT_SYMBOL(__cputime_jiffies_factor); -u64 __cputime_msec_factor; -EXPORT_SYMBOL(__cputime_msec_factor); +u64 __cputime_usec_factor; +EXPORT_SYMBOL(__cputime_usec_factor); u64 __cputime_sec_factor; EXPORT_SYMBOL(__cputime_sec_factor); u64 __cputime_clockt_factor; @@ -192,8 +192,8 @@ static void calc_cputime_factors(void) div128_by_32(HZ, 0, tb_ticks_per_sec, &res); __cputime_jiffies_factor = res.result_low; - div128_by_32(1000, 0, tb_ticks_per_sec, &res); - __cputime_msec_factor = res.result_low; + div128_by_32(1000000, 0, tb_ticks_per_sec, &res); + __cputime_usec_factor = res.result_low; div128_by_32(1, 0, tb_ticks_per_sec, &res); __cputime_sec_factor = res.result_low; div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); @@ -859,13 +859,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, void update_vsyscall_tz(void) { - /* Make userspace gettimeofday spin until we're done. */ - ++vdso_data->tb_update_count; - smp_mb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_mb(); - ++vdso_data->tb_update_count; } static void __init clocksource_init(void) @@ -889,6 +884,15 @@ static void __init clocksource_init(void) clock->name, clock->mult, clock->shift); } +void decrementer_check_overflow(void) +{ + u64 now = get_tb_or_rtc(); + struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); + + if (now >= decrementer->next_tb) + set_dec(1); +} + static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1a0141426cd..6889f2676a9 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -935,8 +935,9 @@ static int emulate_instruction(struct pt_regs *regs) cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; - mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr = regs->gpr[rd]; current->thread.dscr_inherit = 1; + mtspr(SPRN_DSCR, current->thread.dscr); return 0; } #endif diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9a47..ded8a1a0426 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -79,6 +79,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, run->dcr.dcrn = dcrn; run->dcr.data = 0; run->dcr.is_write = 0; + vcpu->arch.dcr_is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); @@ -100,6 +101,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, run->dcr.dcrn = dcrn; run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; + vcpu->arch.dcr_is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 2c1ae7a5fb5..97042c66e11 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -221,7 +221,7 @@ int __node_distance(int a, int b) int distance = LOCAL_DISTANCE; if (!form1_affinity) - return distance; + return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); for (i = 0; i < distance_ref_points_depth; i++) { if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 856e9c39806..6786f9d581b 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -100,6 +100,7 @@ spufs_new_inode(struct super_block *sb, int mode) if (!inode) goto out; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 1b5dc1a2e14..daf793b1e94 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -85,9 +85,11 @@ void __init wii_memory_fixups(void) wii_hole_start = p[0].base + p[0].size; wii_hole_size = p[1].base - wii_hole_start; - pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size); + pr_info("MEM1: <%08llx %08llx>\n", + (unsigned long long) p[0].base, (unsigned long long) p[0].size); pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size); - pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size); + pr_info("MEM2: <%08llx %08llx>\n", + (unsigned long long) p[1].base, (unsigned long long) p[1].size); p[0].size += wii_hole_size + p[1].size; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index db092d7c4c5..53a6be7ebe3 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -414,7 +414,7 @@ static struct irqaction psurge_irqaction = { static void __init smp_psurge_setup_cpu(int cpu_nr) { - if (cpu_nr != 0) + if (cpu_nr != 0 || !psurge_start) return; /* reset the entry point so if we get another intr we won't diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index f106662f438..c9311cfdfca 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long if (opcode > MAX_HCALL_OPCODE) return; - h = &get_cpu_var(hcall_stats)[opcode / 4]; + h = &__get_cpu_var(hcall_stats)[opcode / 4]; h->tb_start = mftb(); h->purr_start = mfspr(SPRN_PURR); } @@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long h->num_calls++; h->tb_total += mftb() - h->tb_start; h->purr_total += mfspr(SPRN_PURR) - h->purr_start; - - put_cpu_var(hcall_stats); } static int __init hcall_inst_init(void) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index ed96b376537..2e0b2a76c00 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -377,7 +377,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - BUG_ON(lpar_rc != H_NOT_FOUND); + + /* + * The test for adjunct partition is performed before the + * ANDCOND test. H_RESOURCE may be returned, so we need to + * check for that as well. + */ + BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; @@ -745,6 +751,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) goto out; (*depth)++; + preempt_disable(); trace_hcall_entry(opcode, args); (*depth)--; @@ -767,6 +774,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval, (*depth)++; trace_hcall_exit(opcode, retval, retbuf); + preempt_enable(); (*depth)--; out: diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 42541bbcc7f..ace17844191 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -975,7 +975,7 @@ static int cpu_cmd(void) /* print cpus waiting or in xmon */ printf("cpus stopped:"); count = 0; - for (cpu = 0; cpu < NR_CPUS; ++cpu) { + for_each_possible_cpu(cpu) { if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { if (count == 0) printf(" %x", cpu); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c03fef7a9c2..c395f713ce3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -89,7 +89,6 @@ config S390 select HAVE_GET_USER_PAGES_FAST select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 - select HAVE_RCU_TABLE_FREE if SMP select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK @@ -228,6 +227,9 @@ config COMPAT config SYSVIPC_COMPAT def_bool y if COMPAT && SYSVIPC +config KEYS_COMPAT + def_bool y if COMPAT && KEYS + config AUDIT_ARCH def_bool y diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index da359ca6fe5..f7b74bcce10 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -172,13 +172,6 @@ static inline int is_compat_task(void) return is_32bit_task(); } -#else - -static inline int is_compat_task(void) -{ - return 0; -} - #endif static inline void __user *arch_compat_alloc_user_space(long len) diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 38e71ebcd3c..e4b6609fe92 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *); unsigned long *page_table_alloc(struct mm_struct *); void page_table_free(struct mm_struct *, unsigned long *); -#ifdef CONFIG_HAVE_RCU_TABLE_FREE void page_table_free_rcu(struct mmu_gather *, unsigned long *); -void __tlb_remove_table(void *_table); -#endif static inline void clear_table(unsigned long *s, unsigned long val, size_t n) { diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 801fbe1d837..4e152532754 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -67,6 +67,10 @@ static inline int is_zero_pfn(unsigned long pfn) #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h index cdf5cb2fe03..c872626346e 100644 --- a/arch/s390/include/asm/signal.h +++ b/arch/s390/include/asm/signal.h @@ -131,6 +131,7 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 88829a40af6..a4b8f6009c2 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -126,4 +126,32 @@ static inline unsigned long long get_clock_monotonic(void) return get_clock_xt() - sched_clock_base_cc; } +/** + * tod_to_ns - convert a TOD format value to nanoseconds + * @todval: to be converted TOD format value + * Returns: number of nanoseconds that correspond to the TOD format value + * + * Converting a 64 Bit TOD format value to nanoseconds means that the value + * must be divided by 4.096. In order to achieve that we multiply with 125 + * and divide by 512: + * + * ns = (todval * 125) >> 9; + * + * In order to avoid an overflow with the multiplication we can rewrite this. + * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) + * we end up with + * + * ns = ((2^32 * th + tl) * 125 ) >> 9; + * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); + * + */ +static inline unsigned long long tod_to_ns(unsigned long long todval) +{ + unsigned long long ns; + + ns = ((todval >> 32) << 23) * 125; + ns += ((todval & 0xffffffff) * 125) >> 9; + return ns; +} + #endif diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index c687a2c8346..775a5eea8f9 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -30,14 +30,10 @@ struct mmu_gather { struct mm_struct *mm; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; -#endif unsigned int fullmm; - unsigned int need_flush; }; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch { struct rcu_head rcu; unsigned int nr; @@ -49,7 +45,6 @@ struct mmu_table_batch { extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); -#endif static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, @@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, { tlb->mm = mm; tlb->fullmm = full_mm_flush; - tlb->need_flush = 0; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; -#endif if (tlb->fullmm) __tlb_flush_mm(mm); } static inline void tlb_flush_mmu(struct mmu_gather *tlb) { - if (!tlb->need_flush) - return; - tlb->need_flush = 0; - __tlb_flush_mm(tlb->mm); -#ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb_table_flush(tlb); -#endif } static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - tlb_flush_mmu(tlb); + tlb_table_flush(tlb); } /* @@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long address) { -#ifdef CONFIG_HAVE_RCU_TABLE_FREE if (!tlb->fullmm) return page_table_free_rcu(tlb, (unsigned long *) pte); -#endif page_table_free(tlb->mm, (unsigned long *) pte); } @@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 31)) return; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE if (!tlb->fullmm) return tlb_remove_table(tlb, pmd); -#endif crst_table_free(tlb->mm, (unsigned long *) pmd); #endif } @@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 42)) return; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE if (!tlb->fullmm) return tlb_remove_table(tlb, pud); -#endif crst_table_free(tlb->mm, (unsigned long *) pud); #endif } diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index b7a4f2eb005..d7862adc434 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -73,8 +73,6 @@ static inline void __tlb_flush_idte(unsigned long asce) static inline void __tlb_flush_mm(struct mm_struct * mm) { - if (unlikely(cpumask_empty(mm_cpumask(mm)))) - return; /* * If the machine has IDTE we prefer to do a per mm flush * on all cpus instead of doing a local flush if the mm diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 53acaa86dd9..f98af0309b0 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -631,7 +631,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } @@ -642,7 +641,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 541a7509fae..abdc2b1063e 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include "entry.h" diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 5804cfa7cba..5c55466e78e 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -20,8 +20,8 @@ #include #include #include +#include #include -#include #include #include #include diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 0c35dee10b0..0260051c08f 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index dff933065ab..943ea0e4b71 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return (get_clock_monotonic() * 125) >> 9; + return tod_to_ns(get_clock_monotonic()); } /* diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 35c21bf910c..a3db4c80e00 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -358,7 +358,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return 0; } - sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2ada634fc7c..f9804b7aec1 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -584,6 +584,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) } else prefix = 0; + /* + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy + * copying in vcpu load/put. Lets update our copies before we save + * it into the save area + */ + save_fp_regs(&vcpu->arch.guest_fpregs); + save_access_regs(vcpu->arch.guest_acrs); + if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fe103e891e7..6903d441068 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -36,7 +36,6 @@ #include #include #include -#include #include "../kernel/entry.h" #ifndef CONFIG_64BIT @@ -568,6 +567,7 @@ static void pfault_interrupt(unsigned int ext_int_code, tsk->thread.pfault_wait = 0; list_del(&tsk->thread.list); wake_up_process(tsk); + put_task_struct(tsk); } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial @@ -577,14 +577,22 @@ static void pfault_interrupt(unsigned int ext_int_code, put_task_struct(tsk); } else { /* signal bit not set -> a real page is missing. */ - if (tsk->thread.pfault_wait == -1) { + if (tsk->thread.pfault_wait == 1) { + /* Already on the list with a reference: put to sleep */ + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + set_tsk_need_resched(tsk); + } else if (tsk->thread.pfault_wait == -1) { /* Completion interrupt was faster than the initial * interrupt (pfault_wait == -1). Set pfault_wait * back to zero and exit. */ tsk->thread.pfault_wait = 0; } else { /* Initial interrupt arrived before completion - * interrupt. Let the task sleep. */ + * interrupt. Let the task sleep. + * An extra task reference is needed since a different + * cpu may set the task state to TASK_RUNNING again + * before the scheduler is reached. */ + get_task_struct(tsk); tsk->thread.pfault_wait = 1; list_add(&tsk->thread.list, &pfault_list); set_task_state(tsk, TASK_UNINTERRUPTIBLE); @@ -609,6 +617,7 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self, list_del(&thread->list); tsk = container_of(thread, struct task_struct, thread); wake_up_process(tsk); + put_task_struct(tsk); } spin_unlock_irq(&pfault_lock); break; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 65cb06e2af4..4ccf9f54355 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -183,7 +183,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (end < start) + if ((end < start) || (end > TASK_SIZE)) goto slow_irqon; /* diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index c9a9f7f1818..c0cf9ceb383 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -28,8 +28,8 @@ #include #include #include +#include #include -#include static unsigned long stack_maxrandom_size(void) { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 458893f5f6b..51b80b9d1f6 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -243,8 +243,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) } } -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - static void __page_table_free_rcu(void *table, unsigned bit) { struct page *page; @@ -301,7 +299,66 @@ void __tlb_remove_table(void *_table) free_pages((unsigned long) table, ALLOC_ORDER); } -#endif +static void tlb_remove_table_smp_sync(void *arg) +{ + /* Simply deliver the interrupt */ +} + +static void tlb_remove_table_one(void *table) +{ + /* + * This isn't an RCU grace period and hence the page-tables cannot be + * assumed to be actually RCU-freed. + * + * It is however sufficient for software page-table walkers that rely + * on IRQ disabling. See the comment near struct mmu_table_batch. + */ + smp_call_function(tlb_remove_table_smp_sync, NULL, 1); + __tlb_remove_table(table); +} + +static void tlb_remove_table_rcu(struct rcu_head *head) +{ + struct mmu_table_batch *batch; + int i; + + batch = container_of(head, struct mmu_table_batch, rcu); + + for (i = 0; i < batch->nr; i++) + __tlb_remove_table(batch->tables[i]); + + free_page((unsigned long)batch); +} + +void tlb_table_flush(struct mmu_gather *tlb) +{ + struct mmu_table_batch **batch = &tlb->batch; + + if (*batch) { + __tlb_flush_mm(tlb->mm); + call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); + *batch = NULL; + } +} + +void tlb_remove_table(struct mmu_gather *tlb, void *table) +{ + struct mmu_table_batch **batch = &tlb->batch; + + if (*batch == NULL) { + *batch = (struct mmu_table_batch *) + __get_free_page(GFP_NOWAIT | __GFP_NOWARN); + if (*batch == NULL) { + __tlb_flush_mm(tlb->mm); + tlb_remove_table_one(table); + return; + } + (*batch)->nr = 0; + } + (*batch)->tables[(*batch)->nr++] = table; + if ((*batch)->nr == MAX_TABLE_BATCH) + tlb_table_flush(tlb); +} /* * switch on pgstes for its userspace process (for kvm) diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 577abba3fac..83bb96079c4 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -408,7 +408,7 @@ ENTRY(handle_sys) sw r9, [r0, PT_EPC] cmpi.c r27, __NR_syscalls # check syscall number - bgtu illegal_syscall + bgeu illegal_syscall slli r8, r27, 2 # get syscall routine la r11, sys_call_table diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index f38112be67d..978b7fd6d47 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -202,9 +202,9 @@ extern void __kernel_vsyscall; if (vdso_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ else \ - NEW_AUX_ENT(AT_IGNORE, 0); + NEW_AUX_ENT(AT_IGNORE, 0) #else -#define VSYSCALL_AUX_ENT +#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0) #endif /* CONFIG_VSYSCALL */ #ifdef CONFIG_SH_FPU diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 2e794193cd9..bc31e5ec1c2 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -31,6 +31,7 @@ config SPARC config SPARC32 def_bool !64BIT + select GENERIC_ATOMIC64 config SPARC64 def_bool 64BIT @@ -590,6 +591,9 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y +config KEYS_COMPAT + def_bool y if COMPAT && KEYS + endmenu source "net/Kconfig" diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index ad1fb5d969f..eddcfb36aaf 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -31,7 +31,7 @@ UTS_MACHINE := sparc #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 -KBUILD_AFLAGS += -m32 +KBUILD_AFLAGS += -m32 -Wa,-Av8 #LDFLAGS_vmlinux = -N -Ttext 0xf0004000 # Since 2.5.40, the first stage is left not btfix-ed. diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7ae128b19d3..98f223a102c 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -15,6 +15,8 @@ #ifdef __KERNEL__ +#include + #include #define ATOMIC_INIT(i) { (i) } diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 177061064ee..f368cef0379 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep_set_wrprotect(mm, addr, ptep); + pte_t old_pte = *ptep; + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty) { - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + int changed = !pte_same(*ptep, pte); + if (changed) { + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + flush_tlb_page(vma, addr); + } + return changed; } static inline pte_t huge_ptep_get(pte_t *ptep) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 98226280423..ba63d0888aa 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -774,6 +774,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } +#include #include /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h index e49b828a247..492943173ab 100644 --- a/arch/sparc/include/asm/signal.h +++ b/arch/sparc/include/asm/signal.h @@ -191,6 +191,7 @@ struct __old_sigaction { unsigned long sa_flags; void (*sa_restorer)(void); /* not used by Linux/SPARC yet */ }; +#define __ARCH_HAS_SA_RESTORER typedef struct sigaltstack { void __user *ss_sp; diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 10bcabce97b..f856c7fddc2 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -140,8 +140,7 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ +do { save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef46349415..f0d6a9700f4 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; + unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr) flush_tlb_pending() -#define flush_tlb_mm(mm) flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode() do {} while (0) /* Local cpu only. */ extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} + #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) +#define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 7eef3f74196..f5ddc0bae38 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -268,4 +268,4 @@ static int __init sunfire_init(void) return 0; } -subsys_initcall(sunfire_init); +fs_initcall(sunfire_init); diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 7429b47c3ac..dcae702fc1f 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -1269,4 +1269,4 @@ static int __init ds_init(void) return vio_register_driver(&ds_driver); } -subsys_initcall(ds_init); +fs_initcall(ds_init); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 6860d40253c..904ed639bc1 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -513,11 +513,13 @@ static u64 nop_for_index(int idx) static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { - u64 val, mask = mask_for_index(idx); + u64 enc, val, mask = mask_for_index(idx); + + enc = perf_event_get_enc(cpuc->events[idx]); val = cpuc->pcr; val &= ~mask; - val |= hwc->config; + val |= event_encoding(enc, idx); cpuc->pcr = val; pcr_ops->write(cpuc->pcr); @@ -1380,8 +1382,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { struct sparc_stackf *usf, sf; @@ -1402,8 +1402,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { struct sparc_stackf32 *usf, sf; @@ -1422,6 +1420,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 77f1b95e080..9171fc238de 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -20,11 +20,6 @@ .text .align 32 -__handle_softirq: - call do_softirq - nop - ba,a,pt %xcc, __handle_softirq_continue - nop __handle_preemption: call schedule wrpr %g0, RTRAP_PSTATE, %pstate @@ -89,9 +84,7 @@ rtrap: cmp %l1, 0 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ - bne,pn %icc, __handle_softirq ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 -__handle_softirq_continue: rtrap_xcall: sethi %hi(0xf << 20), %l4 and %l1, %l4, %l4 diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index d58260bff2d..77d476175dc 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -309,9 +309,7 @@ void do_rt_sigreturn(struct pt_regs *regs) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); - - if (err) + if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) goto segv; err |= __get_user(rwin_save, &sf->rwin_save); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 99cb17251bb..e82c18efb06 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -856,7 +856,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_receive_signal; @@ -1070,23 +1070,56 @@ void smp_flush_tlb_mm(struct mm_struct *mm) put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 908b47a5ee2..10c9b369169 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 1d7e274f3f2..7f5f65d0b3f 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -212,24 +212,20 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 - /* Check if force_successful_syscall_return() - * was invoked. - */ - ldub [%g6 + TI_SYS_NOERROR], %l2 - brnz,a,pn %l2, 80f - stb %g0, [%g6 + TI_SYS_NOERROR] - cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 -80: + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + +2: + stb %g0, [%g6 + TI_SYS_NOERROR] /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 +3: stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] bne,pn %icc, linux_syscall_trace2 add %l1, 0x4, %l2 ! npc = npc+4 @@ -238,20 +234,20 @@ ret_sys_call: stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 1: + /* Check if force_successful_syscall_return() + * was invoked. + */ + ldub [%g6 + TI_SYS_NOERROR], %l2 + brnz,pn %l2, 2b + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 sub %g0, %o0, %o0 - or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] - stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] - bne,pn %icc, linux_syscall_trace2 - add %l1, 0x4, %l2 ! npc = npc+4 - stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] + ba,pt %xcc, 3b + or %g3, %g2, %g3 - b,pt %xcc, rtrap - stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] linux_syscall_trace2: call syscall_trace_leave add %sp, PTREGS_OFF, %o0 diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index f566518483b..248fb676336 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -74,7 +74,7 @@ sys_call_table32: .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid -/*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat +/*280*/ .word sys32_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64 /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 8e073d80213..6ff4d78d859 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2118,6 +2118,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2148,15 +2151,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b1f279cd00b..afd021ed941 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); + struct mm_struct *mm = tb->mm; - if (tb->tlb_nr) { - flush_tsb_user(tb); + if (!tb->tlb_nr) + goto out; - if (CTX_VALID(tb->mm->context)) { + flush_tsb_user(tb); + + if (CTX_VALID(mm->context)) { + if (tb->tlb_nr == 1) { + global_flush_tlb_page(mm, tb->vaddrs[0]); + } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } - tb->tlb_nr = 0; } + tb->tlb_nr = 0; + +out: put_cpu_var(tlb_batch); } +void arch_enter_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + if (tb->tlb_nr) + flush_tlb_pending(); + tb->active = 0; +} + void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { @@ -90,6 +114,12 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, nr = 0; } + if (!tb->active) { + global_flush_tlb_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr); + goto out; + } + if (nr == 0) tb->mm = mm; @@ -98,5 +128,6 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, if (nr >= TLB_BATCH_NR) flush_tlb_pending(); +out: put_cpu_var(tlb_batch); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a5f51b22fcb..cb16ff33b84 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -8,11 +8,10 @@ #include #include #include -#include -#include -#include #include +#include #include +#include #include extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -47,23 +46,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, - unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries) { - unsigned long i; + unsigned long tag, ent, hash; - for (i = 0; i < tb->tlb_nr; i++) { - unsigned long v = tb->vaddrs[i]; - unsigned long tag, ent, hash; + v &= ~0x1UL; + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); - v &= ~0x1UL; + tsb_flush(ent, tag); +} - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) +{ + unsigned long i; - tsb_flush(ent, tag); - } + for (i = 0; i < tb->tlb_nr; i++) + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } void flush_tsb_user(struct tlb_batch *tb) @@ -91,6 +94,30 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + } +#endif + spin_unlock_irqrestore(&mm->context.lock, flags); +} + #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b57a5942ba6..dd10caac010 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */ nop nop + .align 32 + .globl __flush_tlb_page +__flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, %pstate + mov SECONDARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + andn %o1, 1, %o3 + be,pn %icc, 1f + or %o3, 0x10, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + retl + wrpr %g7, 0x0, %pstate + nop + nop + nop + nop + .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + be,pn %icc, 1f + andn %o1, 1, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop +__hypervisor_flush_tlb_page: /* 11 insns */ + /* %o0 = context, %o1 = vaddr */ + mov %o0, %g2 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g2, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + retl + nop + __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=context, %g1=nr, %g7=vaddrs[] */ - sllx %g1, 3, %g1 + .globl xcall_flush_tlb_page +xcall_flush_tlb_page: /* 17 insns */ + /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %g5 - andcc %g5, 0x1, %g0 + andcc %g1, 0x1, %g0 be,pn %icc, 2f - - andn %g5, 0x1, %g5 + andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync - brnz,pt %g1, 1b - nop stxa %g2, [%g4] ASI_DMMU retry nop + nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -495,11 +563,11 @@ xcall_fetch_glob_regs: stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ - rdpr %cwp, %g2 - sub %g2, 1, %g7 + rdpr %cwp, %g3 + sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 - wrpr %g2, %cwp + wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 @@ -596,15 +664,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ - sllx %g1, 3, %g1 + .globl __hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ + /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -613,8 +679,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 - brnz,pt %g1, 1b - nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -697,6 +761,13 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__hypervisor_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 + call tlb_patch_one + mov 11, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -728,12 +799,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_pending), %o0 - or %o0, %lo(xcall_flush_tlb_pending), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + sethi %hi(xcall_flush_tlb_page), %o0 + or %o0, %lo(xcall_flush_tlb_page), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 21, %o2 + mov 17, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 0249b8b4db5..532a2a42ab7 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -11,6 +11,7 @@ config TILE select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP select GENERIC_IRQ_SHOW + select HAVE_SYSCALL_WRAPPERS if TILEGX select SYS_HYPERVISOR # FIXME: investigate whether we need/want these options. diff --git a/arch/tile/Makefile b/arch/tile/Makefile index 17acce70569..04c637c4eb4 100644 --- a/arch/tile/Makefile +++ b/arch/tile/Makefile @@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH)) endif endif +# The tile compiler may emit .eh_frame information for backtracing. +# In kernel modules, this causes load failures due to unsupported relocations. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"") KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) endif diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 16f1fa51fea..bd186c4eaa5 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -77,6 +77,11 @@ static inline int ffs(int x) return __builtin_ffs(x); } +static inline int fls64(__u64 w) +{ + return (sizeof(__u64) * 8) - __builtin_clzll(w); +} + /** * fls - find last set bit in word * @x: the word to search @@ -90,12 +95,7 @@ static inline int ffs(int x) */ static inline int fls(int x) { - return (sizeof(int) * 8) - __builtin_clz(x); -} - -static inline int fls64(__u64 w) -{ - return (sizeof(__u64) * 8) - __builtin_clzll(w); + return fls64((unsigned int) x); } static inline unsigned int __arch_hweight32(unsigned int w) diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index a7869ad6277..41459d80b6b 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -406,19 +406,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; regs->regs[0] = (unsigned long) usig; - - if (ka->sa.sa_flags & SA_SIGINFO) { - /* Need extra arguments, so mark to restore caller-saves. */ - regs->regs[1] = ptr_to_compat_reg(&frame->info); - regs->regs[2] = ptr_to_compat_reg(&frame->uc); - regs->flags |= PT_FLAGS_CALLER_SAVES; - } + regs->regs[1] = ptr_to_compat_reg(&frame->info); + regs->regs[2] = ptr_to_compat_reg(&frame->uc); + regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 41474fb5eee..5888f1b8347 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -69,6 +69,8 @@ extern unsigned long end_iomem; #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) +#define io_remap_pfn_range remap_pfn_range + /* * The i386 can't do page protection for execute, and considers that the same * are read. @@ -271,6 +273,12 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) +#define __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); +} + /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. @@ -346,11 +354,11 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); #define update_mmu_cache(vma,address,ptep) do ; while (0) /* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val >> 4) & 0x3f) +#define __swp_type(x) (((x).val >> 5) & 0x1f) #define __swp_offset(x) ((x).val >> 11) #define __swp_entry(type, offset) \ - ((swp_entry_t) { ((type) << 4) | ((offset) << 11) }) + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) }) #define __pte_to_swp_entry(pte) \ ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 37357a599dc..90bf3144a59 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1219,10 +1219,6 @@ config HAVE_ARCH_BOOTMEM def_bool y depends on X86_32 && NUMA -config HAVE_ARCH_ALLOC_REMAP - def_bool y - depends on X86_32 && NUMA - config ARCH_HAVE_MEMORY_PRESENT def_bool y depends on X86_32 && DISCONTIGMEM @@ -1451,6 +1447,15 @@ config ARCH_USES_PG_UNCACHED def_bool y depends on X86_PAT +config ARCH_RANDOM + def_bool y + prompt "x86 architectural random number generator" if EXPERT + ---help--- + Enable the x86 architectural RDRAND instruction + (Intel Bull Mountain technology) to generate random numbers. + If supported, this is a high bandwidth, cryptographically + secure hardware random number generator. + config EFI bool "EFI runtime service support" depends on ACPI diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index be6d9e365a8..3470624d783 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec) pxor IN3, STATE4 movaps IN4, IV #else - pxor (INP), STATE2 - pxor 0x10(INP), STATE3 pxor IN1, STATE4 movaps IN2, IV + movups (INP), IN1 + pxor IN1, STATE2 + movups 0x10(INP), IN2 + pxor IN2, STATE3 #endif movups STATE1, (OUTP) movups STATE2, 0x10(OUTP) diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index c1870dddd32..26af1e31629 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -208,7 +208,7 @@ sysexit_from_sys_call: testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) jnz ia32_ret_from_sys_call TRACE_IRQS_ON - sti + ENABLE_INTERRUPTS(CLBR_NONE) movl %eax,%esi /* second arg, syscall return value */ cmpl $0,%eax /* is it < 0? */ setl %al /* 1 if so, 0 if not */ @@ -218,7 +218,7 @@ sysexit_from_sys_call: GET_THREAD_INFO(%r10) movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - cli + DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %edi,TI_flags(%r10) jz \exit diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 67f87f25761..78a1eff7422 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_AMD_NB_H #define _ASM_X86_AMD_NB_H +#include #include struct amd_nb_bus_dev_range { @@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[]; extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[]; extern bool early_is_amd_nb(u32 value); +extern struct resource *amd_get_mmconfig_range(struct resource *res); extern int amd_cache_northbridges(void); extern void amd_flush_garts(void); extern int amd_numa_init(void); diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h new file mode 100644 index 00000000000..0d9ec770f2f --- /dev/null +++ b/arch/x86/include/asm/archrandom.h @@ -0,0 +1,75 @@ +/* + * This file is part of the Linux kernel. + * + * Copyright (c) 2011, Intel Corporation + * Authors: Fenghua Yu , + * H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef ASM_X86_ARCHRANDOM_H +#define ASM_X86_ARCHRANDOM_H + +#include +#include +#include +#include + +#define RDRAND_RETRY_LOOPS 10 + +#define RDRAND_INT ".byte 0x0f,0xc7,0xf0" +#ifdef CONFIG_X86_64 +# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0" +#else +# define RDRAND_LONG RDRAND_INT +#endif + +#ifdef CONFIG_ARCH_RANDOM + +#define GET_RANDOM(name, type, rdrand, nop) \ +static inline int name(type *v) \ +{ \ + int ok; \ + alternative_io("movl $0, %0\n\t" \ + nop, \ + "\n1: " rdrand "\n\t" \ + "jc 2f\n\t" \ + "decl %0\n\t" \ + "jnz 1b\n\t" \ + "2:", \ + X86_FEATURE_RDRAND, \ + ASM_OUTPUT2("=r" (ok), "=a" (*v)), \ + "0" (RDRAND_RETRY_LOOPS)); \ + return ok; \ +} + +#ifdef CONFIG_X86_64 + +GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5); +GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4); + +#else + +GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3); +GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); + +#endif /* CONFIG_X86_64 */ + +#endif /* CONFIG_ARCH_RANDOM */ + +extern void x86_init_rdrand(struct cpuinfo_x86 *c); + +#endif /* ASM_X86_ARCHRANDOM_H */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 71cc3800712..c5d941f08ba 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -173,7 +173,7 @@ #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */ +#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index c9e09ea0564..a850b4d8d14 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size; extern void fpu_init(void); extern void mxcsr_feature_mask_init(void); extern int init_fpu(struct task_struct *child); -extern asmlinkage void math_state_restore(void); -extern void __math_state_restore(void); +extern void __math_state_restore(struct task_struct *); +extern void math_state_restore(void); extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); extern user_regset_active_fn fpregs_active, xfpregs_active; @@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu) #endif /* CONFIG_X86_64 */ -/* We need a safe address that is cheap to find and that is already - in L1 during context switch. The best choices are unfortunately - different for UP and SMP */ -#ifdef CONFIG_SMP -#define safe_address (__per_cpu_offset[0]) -#else -#define safe_address (kstat_cpu(0).cpustat.user) -#endif - /* - * These must be called with preempt disabled + * These must be called with preempt disabled. Returns + * 'true' if the FPU state is still intact. */ -static inline void fpu_save_init(struct fpu *fpu) +static inline int fpu_save_init(struct fpu *fpu) { if (use_xsave()) { fpu_xsave(fpu); @@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu) * xsave header may indicate the init state of the FP. */ if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) - return; + return 1; } else if (use_fxsr()) { fpu_fxsave(fpu); } else { asm volatile("fnsave %[fx]; fwait" : [fx] "=m" (fpu->state->fsave)); - return; + return 0; } - if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) + /* + * If exceptions are pending, we need to clear them so + * that we don't randomly get exceptions later. + * + * FIXME! Is this perhaps only true for the old-style + * irq13 case? Maybe we could leave the x87 state + * intact otherwise? + */ + if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) { asm volatile("fnclex"); - - /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception - is pending. Clear the x87 state here by setting it to fixed - values. safe_address is a random variable that should be in L1 */ - alternative_input( - ASM_NOP8 ASM_NOP2, - "emms\n\t" /* clear stack tags */ - "fildl %P[addr]", /* set F?P to defined value */ - X86_FEATURE_FXSAVE_LEAK, - [addr] "m" (safe_address)); + return 0; + } + return 1; } -static inline void __save_init_fpu(struct task_struct *tsk) +static inline int __save_init_fpu(struct task_struct *tsk) { - fpu_save_init(&tsk->thread.fpu); - task_thread_info(tsk)->status &= ~TS_USEDFPU; + return fpu_save_init(&tsk->thread.fpu); } static inline int fpu_fxrstor_checking(struct fpu *fpu) @@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk) } /* - * Signal frame handlers... + * Software FPU state helpers. Careful: these need to + * be preemption protection *and* they need to be + * properly paired with the CR0.TS changes! */ -extern int save_i387_xstate(void __user *buf); -extern int restore_i387_xstate(void __user *buf); +static inline int __thread_has_fpu(struct task_struct *tsk) +{ + return tsk->thread.has_fpu; +} -static inline void __unlazy_fpu(struct task_struct *tsk) +/* Must be paired with an 'stts' after! */ +static inline void __thread_clear_has_fpu(struct task_struct *tsk) { - if (task_thread_info(tsk)->status & TS_USEDFPU) { - __save_init_fpu(tsk); - stts(); - } else - tsk->fpu_counter = 0; + tsk->thread.has_fpu = 0; +} + +/* Must be paired with a 'clts' before! */ +static inline void __thread_set_has_fpu(struct task_struct *tsk) +{ + tsk->thread.has_fpu = 1; } +/* + * Encapsulate the CR0.TS handling together with the + * software flag. + * + * These generally need preemption protection to work, + * do try to avoid using these on their own. + */ +static inline void __thread_fpu_end(struct task_struct *tsk) +{ + __thread_clear_has_fpu(tsk); + stts(); +} + +static inline void __thread_fpu_begin(struct task_struct *tsk) +{ + clts(); + __thread_set_has_fpu(tsk); +} + +/* + * FPU state switching for scheduling. + * + * This is a two-stage process: + * + * - switch_fpu_prepare() saves the old state and + * sets the new state of the CR0.TS bit. This is + * done within the context of the old process. + * + * - switch_fpu_finish() restores the new state as + * necessary. + */ +typedef struct { int preload; } fpu_switch_t; + +/* + * FIXME! We could do a totally lazy restore, but we need to + * add a per-cpu "this was the task that last touched the FPU + * on this CPU" variable, and the task needs to have a "I last + * touched the FPU on this CPU" and check them. + * + * We don't do that yet, so "fpu_lazy_restore()" always returns + * false, but some day.. + */ +#define fpu_lazy_restore(tsk) (0) +#define fpu_lazy_state_intact(tsk) do { } while (0) + +static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new) +{ + fpu_switch_t fpu; + + fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; + if (__thread_has_fpu(old)) { + if (__save_init_fpu(old)) + fpu_lazy_state_intact(old); + __thread_clear_has_fpu(old); + old->fpu_counter++; + + /* Don't change CR0.TS if we just switch! */ + if (fpu.preload) { + __thread_set_has_fpu(new); + prefetch(new->thread.fpu.state); + } else + stts(); + } else { + old->fpu_counter = 0; + if (fpu.preload) { + if (fpu_lazy_restore(new)) + fpu.preload = 0; + else + prefetch(new->thread.fpu.state); + __thread_fpu_begin(new); + } + } + return fpu; +} + +/* + * By the time this gets called, we've already cleared CR0.TS and + * given the process the FPU if we are going to preload the FPU + * state - all we need to do is to conditionally restore the register + * state itself. + */ +static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) +{ + if (fpu.preload) + __math_state_restore(new); +} + +/* + * Signal frame handlers... + */ +extern int save_i387_xstate(void __user *buf); +extern int restore_i387_xstate(void __user *buf); + static inline void __clear_fpu(struct task_struct *tsk) { - if (task_thread_info(tsk)->status & TS_USEDFPU) { + if (__thread_has_fpu(tsk)) { /* Ignore delayed exceptions from user space */ asm volatile("1: fwait\n" "2:\n" _ASM_EXTABLE(1b, 2b)); - task_thread_info(tsk)->status &= ~TS_USEDFPU; - stts(); + __thread_fpu_end(tsk); } } +/* + * Were we in an interrupt that interrupted kernel mode? + * + * We can do a kernel_fpu_begin/end() pair *ONLY* if that + * pair does nothing at all: the thread must not have fpu (so + * that we don't try to save the FPU state), and TS must + * be set (so that the clts/stts pair does nothing that is + * visible in the interrupted kernel thread). + */ +static inline bool interrupted_kernel_fpu_idle(void) +{ + return !__thread_has_fpu(current) && + (read_cr0() & X86_CR0_TS); +} + +/* + * Were we in user mode (or vm86 mode) when we were + * interrupted? + * + * Doing kernel_fpu_begin/end() is ok if we are running + * in an interrupt context from user mode - we'll just + * save the FPU state as required. + */ +static inline bool interrupted_user_mode(void) +{ + struct pt_regs *regs = get_irq_regs(); + return regs && user_mode_vm(regs); +} + +/* + * Can we use the FPU in kernel mode with the + * whole "kernel_fpu_begin/end()" sequence? + * + * It's always ok in process context (ie "not interrupt") + * but it is sometimes ok even from an irq. + */ +static inline bool irq_fpu_usable(void) +{ + return !in_interrupt() || + interrupted_user_mode() || + interrupted_kernel_fpu_idle(); +} + static inline void kernel_fpu_begin(void) { - struct thread_info *me = current_thread_info(); + struct task_struct *me = current; + + WARN_ON_ONCE(!irq_fpu_usable()); preempt_disable(); - if (me->status & TS_USEDFPU) - __save_init_fpu(me->task); - else + if (__thread_has_fpu(me)) { + __save_init_fpu(me); + __thread_clear_has_fpu(me); + /* We do 'stts()' in kernel_fpu_end() */ + } else clts(); } @@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void) preempt_enable(); } -static inline bool irq_fpu_usable(void) -{ - struct pt_regs *regs; - - return !in_interrupt() || !(regs = get_irq_regs()) || \ - user_mode(regs) || (read_cr0() & X86_CR0_TS); -} - /* * Some instructions like VIA's padlock instructions generate a spurious * DNA fault but don't modify SSE registers. And these instructions @@ -362,21 +492,65 @@ static inline void irq_ts_restore(int TS_state) stts(); } +/* + * The question "does this thread have fpu access?" + * is slightly racy, since preemption could come in + * and revoke it immediately after the test. + * + * However, even in that very unlikely scenario, + * we can just assume we have FPU access - typically + * to save the FP state - we'll just take a #NM + * fault and get the FPU access back. + * + * The actual user_fpu_begin/end() functions + * need to be preemption-safe, though. + * + * NOTE! user_fpu_end() must be used only after you + * have saved the FP state, and user_fpu_begin() must + * be used only immediately before restoring it. + * These functions do not do any save/restore on + * their own. + */ +static inline int user_has_fpu(void) +{ + return __thread_has_fpu(current); +} + +static inline void user_fpu_end(void) +{ + preempt_disable(); + __thread_fpu_end(current); + preempt_enable(); +} + +static inline void user_fpu_begin(void) +{ + preempt_disable(); + if (!user_has_fpu()) + __thread_fpu_begin(current); + preempt_enable(); +} + /* * These disable preemption on their own and are safe */ static inline void save_init_fpu(struct task_struct *tsk) { + WARN_ON_ONCE(!__thread_has_fpu(tsk)); preempt_disable(); __save_init_fpu(tsk); - stts(); + __thread_fpu_end(tsk); preempt_enable(); } static inline void unlazy_fpu(struct task_struct *tsk) { preempt_disable(); - __unlazy_fpu(tsk); + if (__thread_has_fpu(tsk)) { + __save_init_fpu(tsk); + __thread_fpu_end(tsk); + } else + tsk->fpu_counter = 0; preempt_enable(); } diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 0049211959c..0ab6a4dcb91 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -189,6 +189,9 @@ struct x86_emulate_ops { int (*intercept)(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage); + + bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -298,6 +301,19 @@ struct x86_emulate_ctxt { #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ X86EMUL_MODE_PROT64) +/* CPUID vendors */ +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 + +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 + +#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 +#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e +#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 + enum x86_intercept_stage { X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ X86_ICPT_PRE_EXCEPT, diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d2ac8e2ee89..1eb45de173c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -391,8 +391,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - unsigned int time_offset; - struct page *time_page; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; u64 last_guest_tsc; u64 last_kernel_ns; u64 last_tsc_nsec; diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index ffa037f28d3..a6a64141a8e 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -14,12 +14,6 @@ extern struct pglist_data *node_data[]; #include -extern void resume_map_numa_kva(pgd_t *pgd); - -#else /* !CONFIG_NUMA */ - -static inline void resume_map_numa_kva(pgd_t *pgd) {} - #endif /* CONFIG_NUMA */ #ifdef CONFIG_DISCONTIGMEM diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ebbc4d8ab17..2fdfe312b5d 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,7 +731,10 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +} static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 82885099c86..4b67ec957e8 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -85,6 +85,7 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); + void (*flush)(void); }; struct pv_time_ops { @@ -673,6 +674,7 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index effff47a3c8..43876f16caf 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) ptep->pte_low = pte.pte_low; } +#define pmd_read_atomic pmd_read_atomic +/* + * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with + * a "*pmdp" dereference done by gcc. Problem is, in certain places + * where pte_offset_map_lock is called, concurrent page faults are + * allowed, if the mmap_sem is hold for reading. An example is mincore + * vs page faults vs MADV_DONTNEED. On the page fault side + * pmd_populate rightfully does a set_64bit, but if we're reading the + * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen + * because gcc will not read the 64bit of the pmd atomically. To fix + * this all places running pmd_offset_map_lock() while holding the + * mmap_sem in read mode, shall read the pmdp pointer using this + * function to know if the pmd is null nor not, and in turn to know if + * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd + * operations. + * + * Without THP if the mmap_sem is hold for reading, the + * pmd can only transition from null to not null while pmd_read_atomic runs. + * So there's no need of literally reading it atomically. + * + * With THP if the mmap_sem is hold for reading, the pmd can become + * THP or null or point to a pte (and in turn become "stable") at any + * time under pmd_read_atomic, so it's mandatory to read it atomically + * with cmpxchg8b. + */ +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + pmdval_t ret; + u32 *tmp = (u32 *)pmdp; + + ret = (pmdval_t) (*tmp); + if (ret) { + /* + * If the low part is null, we must not read the high part + * or we can end up with a partial pmd. + */ + smp_rmb(); + ret |= ((pmdval_t)*(tmp + 1)) << 32; + } + + return (pmd_t) { ret }; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + return (pmd_t) { atomic64_read((atomic64_t *)pmdp) }; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) { set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 18601c86fab..6be990922d4 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -142,12 +142,16 @@ static inline unsigned long pmd_pfn(pmd_t pmd) return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; } +static inline unsigned long pud_pfn(pud_t pud) +{ + return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; +} + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) static inline int pmd_large(pmd_t pte) { - return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == - (_PAGE_PSE | _PAGE_PRESENT); + return pmd_flags(pte) & _PAGE_PSE; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -415,7 +419,13 @@ static inline int pte_hidden(pte_t pte) static inline int pmd_present(pmd_t pmd) { - return pmd_flags(pmd) & _PAGE_PRESENT; + /* + * Checking for _PAGE_PSE is needed too because + * split_huge_page will temporarily clear the present bit (but + * the _PAGE_PSE flag will remain set at all times while the + * _PAGE_PRESENT bit is clear). + */ + return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); } static inline int pmd_none(pmd_t pmd) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 219371546af..e5f724834ed 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -99,7 +99,6 @@ struct cpuinfo_x86 { u16 apicid; u16 initial_apicid; u16 x86_clflush_size; -#ifdef CONFIG_SMP /* number of cores as seen by the OS: */ u16 booted_cores; /* Physical processor id: */ @@ -110,7 +109,6 @@ struct cpuinfo_x86 { u8 compute_unit_id; /* Index into per_cpu list: */ u16 cpu_index; -#endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -454,6 +452,7 @@ struct thread_struct { unsigned long trap_no; unsigned long error_code; /* floating point and extended processor state */ + unsigned long has_fpu; struct fpu fpu; #ifdef CONFIG_X86_32 /* Virtual 86 mode info */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 94e7618fcac..f332d64cdd5 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -187,21 +187,14 @@ static inline int v8086_mode(struct pt_regs *regs) #endif } -/* - * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. The previous stack will be directly underneath the saved - * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. - * - * This is valid only for kernel mode traps. - */ -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ #ifdef CONFIG_X86_32 - return (unsigned long)(®s->sp); +extern unsigned long kernel_stack_pointer(struct pt_regs *regs); #else +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ return regs->sp; -#endif } +#endif #define GET_IP(regs) ((regs)->ip) #define GET_FP(regs) ((regs)->bp) diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 598457cbd0f..6cbc795b7c5 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -125,6 +125,8 @@ typedef unsigned long sigset_t; extern void do_notify_resume(struct pt_regs *, void *, __u32); # endif /* __KERNEL__ */ +#define __ARCH_HAS_SA_RESTORER + #ifdef __i386__ # ifdef __KERNEL__ struct old_sigaction { diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index c2ff2a1d845..f0d89d9ba57 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -93,10 +93,6 @@ do { \ "memory"); \ } while (0) -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT #else /* frame pointer must be last for get_wchan */ @@ -392,9 +388,6 @@ static inline void clflush(volatile void *__p) #define nop() asm volatile ("nop") -void disable_hlt(void); -void enable_hlt(void); - void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 1f2e61e2898..278d3d5f906 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -242,8 +242,6 @@ static inline struct thread_info *current_thread_info(void) * ever touches our thread-synchronous status, so we don't * have to worry about atomic accesses. */ -#define TS_USEDFPU 0x0001 /* FPU was used by this task - this quantum (SMP) */ #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ #define TS_POLLING 0x0004 /* idle task polling need_resched, skip sending interrupt */ diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index 431793e5d48..34baa0eb5d0 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h @@ -57,14 +57,10 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); static inline unsigned long long __cycles_2_ns(unsigned long long cyc) { - unsigned long long quot; - unsigned long long rem; int cpu = smp_processor_id(); unsigned long long ns = per_cpu(cyc2ns_offset, cpu); - quot = (cyc >> CYC2NS_SCALE_FACTOR); - rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1); - ns += quot * per_cpu(cyc2ns, cpu) + - ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR); + ns += mult_frac(cyc, per_cpu(cyc2ns, cpu), + (1UL << CYC2NS_SCALE_FACTOR)); return ns; } diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 0310da67307..1d4490379b5 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_TRAPS_H #define _ASM_X86_TRAPS_H +#include #include #include /* TRAP_TRACE, ... */ @@ -87,4 +88,29 @@ asmlinkage void smp_thermal_interrupt(void); asmlinkage void mce_threshold_interrupt(void); #endif +/* Interrupts/Exceptions */ +enum { + X86_TRAP_DE = 0, /* 0, Divide-by-zero */ + X86_TRAP_DB, /* 1, Debug */ + X86_TRAP_NMI, /* 2, Non-maskable Interrupt */ + X86_TRAP_BP, /* 3, Breakpoint */ + X86_TRAP_OF, /* 4, Overflow */ + X86_TRAP_BR, /* 5, Bound Range Exceeded */ + X86_TRAP_UD, /* 6, Invalid Opcode */ + X86_TRAP_NM, /* 7, Device Not Available */ + X86_TRAP_DF, /* 8, Double Fault */ + X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */ + X86_TRAP_TS, /* 10, Invalid TSS */ + X86_TRAP_NP, /* 11, Segment Not Present */ + X86_TRAP_SS, /* 12, Stack Segment Fault */ + X86_TRAP_GP, /* 13, General Protection Fault */ + X86_TRAP_PF, /* 14, Page Fault */ + X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */ + X86_TRAP_MF, /* 16, x87 Floating-Point Exception */ + X86_TRAP_AC, /* 17, Alignment Check */ + X86_TRAP_MC, /* 18, Machine Check */ + X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */ + X86_TRAP_IRET = 32, /* 32, IRET Exception */ +}; + #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 54a13aaebc4..21f7385badb 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa) /* UV global physical address --> socket phys RAM */ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) { - unsigned long paddr = gpa & uv_hub_info->gpa_mask; + unsigned long paddr; unsigned long remap_base = uv_hub_info->lowmem_remap_base; unsigned long remap_top = uv_hub_info->lowmem_remap_top; gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); - gpa = gpa & uv_hub_info->gpa_mask; + paddr = gpa & uv_hub_info->gpa_mask; if (paddr >= remap_base && paddr < remap_base + remap_top) paddr -= remap_base; return paddr; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4558f0d0822..479d03c9c4c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -416,12 +416,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, return 0; } - if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { + if (intsrc->source_irq == 0) { if (acpi_skip_timer_override) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + printk(PREFIX "BIOS IRQ0 override ignored.\n"); return 0; } - if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { + + if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity + && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); } @@ -1327,17 +1329,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d) } /* - * Force ignoring BIOS IRQ0 pin2 override + * Force ignoring BIOS IRQ0 override */ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) { - /* - * The ati_ixp4x0_rev() early PCI quirk should have set - * the acpi_skip_timer_override flag already: - */ if (!acpi_skip_timer_override) { - WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n"); - pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n", + pr_notice("%s detected: Ignoring BIOS IRQ0 override\n", d->ident); acpi_skip_timer_override = 1; } @@ -1431,7 +1428,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { * is enabled. This input is incorrectly designated the * ISA IRQ 0 via an interrupt source override even though * it is wired to the output of the master 8259A and INTIN0 - * is not connected at all. Force ignoring BIOS IRQ0 pin2 + * is not connected at all. Force ignoring BIOS IRQ0 * override in that cases. */ { @@ -1466,6 +1463,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), }, }, + { + .callback = dmi_ignore_irq0_timer_override, + .ident = "FUJITSU SIEMENS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), + }, + }, {} }; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index a81f2d52f86..4c734e6e587 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -161,7 +161,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = #endif #ifdef P6_NOP1 -static const unsigned char __initconst_or_module p6nops[] = +static const unsigned char p6nops[] = { P6_NOP1, P6_NOP2, @@ -220,7 +220,7 @@ void __init arch_init_ideal_nops(void) ideal_nops = intel_nops; #endif } - + break; default: #ifdef CONFIG_X86_64 ideal_nops = k8_nops; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index bfd75ff5d5b..d9302b71066 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -53,6 +53,8 @@ static struct protection_domain *pt_domain; static struct iommu_ops amd_iommu_ops; +static struct dma_map_ops amd_iommu_dma_ops; + /* * general struct to manage commands send to an IOMMU */ @@ -1778,18 +1780,20 @@ static int device_change_notifier(struct notifier_block *nb, domain = domain_for_device(dev); - /* allocate a protection domain if a device is added */ dma_domain = find_protection_domain(devid); - if (dma_domain) - goto out; - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) - goto out; - dma_domain->target_dev = devid; + if (!dma_domain) { + /* allocate a protection domain if a device is added */ + dma_domain = dma_ops_domain_alloc(); + if (!dma_domain) + goto out; + dma_domain->target_dev = devid; + + spin_lock_irqsave(&iommu_pd_list_lock, flags); + list_add_tail(&dma_domain->list, &iommu_pd_list); + spin_unlock_irqrestore(&iommu_pd_list_lock, flags); + } - spin_lock_irqsave(&iommu_pd_list_lock, flags); - list_add_tail(&dma_domain->list, &iommu_pd_list); - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); + dev->archdata.dma_ops = &amd_iommu_dma_ops; break; case BUS_NOTIFY_DEL_DEVICE: diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index bfc8453bd98..d86aa3f1504 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -1031,8 +1031,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu) { int r; - if (pci_enable_msi(iommu->dev)) - return 1; + r = pci_enable_msi(iommu->dev); + if (r) + return r; r = request_threaded_irq(iommu->dev->irq, amd_iommu_int_handler, @@ -1042,24 +1043,33 @@ static int iommu_setup_msi(struct amd_iommu *iommu) if (r) { pci_disable_msi(iommu->dev); - return 1; + return r; } iommu->int_enabled = true; - iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); return 0; } static int iommu_init_msi(struct amd_iommu *iommu) { + int ret; + if (iommu->int_enabled) - return 0; + goto enable_faults; if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) - return iommu_setup_msi(iommu); + ret = iommu_setup_msi(iommu); + else + ret = -ENODEV; - return 1; + if (ret) + return ret; + +enable_faults: + iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); + + return 0; } /**************************************************************************** @@ -1353,6 +1363,7 @@ static struct syscore_ops amd_iommu_syscore_ops = { */ static int __init amd_iommu_init(void) { + struct amd_iommu *iommu; int i, ret = 0; /* @@ -1401,9 +1412,6 @@ static int __init amd_iommu_init(void) if (amd_iommu_pd_alloc_bitmap == NULL) goto free; - /* init the device table */ - init_device_table(); - /* * let all alias entries point to itself */ @@ -1453,6 +1461,12 @@ static int __init amd_iommu_init(void) if (ret) goto free_disable; + /* init the device table */ + init_device_table(); + + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + amd_iommu_init_api(); amd_iommu_init_notifier(); diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 4c39baa8fac..be16854591c 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -119,20 +119,49 @@ bool __init early_is_amd_nb(u32 device) return false; } +struct resource *amd_get_mmconfig_range(struct resource *res) +{ + u32 address; + u64 base, msr; + unsigned segn_busn_bits; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return NULL; + + /* assume all cpus from fam10h have mmconfig */ + if (boot_cpu_data.x86 < 0x10) + return NULL; + + address = MSR_FAM10H_MMIO_CONF_BASE; + rdmsrl(address, msr); + + /* mmconfig is not enabled */ + if (!(msr & FAM10H_MMIO_CONF_ENABLE)) + return NULL; + + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + FAM10H_MMIO_CONF_BUSRANGE_MASK; + + res->flags = IORESOURCE_MEM; + res->start = base; + res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; + return res; +} + int amd_get_subcaches(int cpu) { struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; unsigned int mask; - int cuid = 0; + int cuid; if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return 0; pci_read_config_dword(link, 0x1d4, &mask); -#ifdef CONFIG_SMP cuid = cpu_data(cpu).compute_unit_id; -#endif return (mask >> (4 * cuid)) & 0xf; } @@ -141,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask) static unsigned int reset, ban; struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); unsigned int reg; - int cuid = 0; + int cuid; if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) return -EINVAL; @@ -159,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask) pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); } -#ifdef CONFIG_SMP cuid = cpu_data(cpu).compute_unit_id; -#endif mask <<= 4 * cuid; mask |= (0xf ^ (1 << cuid)) << 26; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b9338b8cf42..14716956927 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1558,9 +1558,11 @@ static int __init apic_verify(void) mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + if (l & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + } pr_info("Found and enabled local APIC!\n"); return 0; @@ -1578,13 +1580,15 @@ int __init apic_force_enable(unsigned long addr) * MSR. This can only be done in software for Intel P6 or later * and AMD K7 (Model > 1) or later. */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { - pr_info("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | addr; - wrmsr(MSR_IA32_APICBASE, l, h); - enabled_via_apicbase = 1; + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + if (!(l & MSR_IA32_APICBASE_ENABLE)) { + pr_info("Local APIC disabled by BIOS -- reenabling.\n"); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | addr; + wrmsr(MSR_IA32_APICBASE, l, h); + enabled_via_apicbase = 1; + } } return apic_verify(); } @@ -2112,10 +2116,12 @@ static void lapic_resume(void) * FIXME! This will be wrong if we ever support suspend on * SMP! We'll need to do this as part of the CPU restore! */ - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; + wrmsr(MSR_IA32_APICBASE, l, h); + } } maxlvt = lapic_get_maxlvt(); diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index f5373dfde21..db4f70492ea 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -20,12 +20,19 @@ static int set_x2apic_phys_mode(char *arg) } early_param("x2apic_phys", set_x2apic_phys_mode); +static bool x2apic_fadt_phys(void) +{ + if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { + printk(KERN_DEBUG "System requires x2apic physical mode\n"); + return true; + } + return false; +} + static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { - if (x2apic_phys) - return x2apic_enabled(); - else - return 0; + return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys()); } static void @@ -108,7 +115,7 @@ static void init_x2apic_ldr(void) static int x2apic_phys_probe(void) { - if (x2apic_mode && x2apic_phys) + if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys())) return 1; return apic == &apic_x2apic_phys; diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index cfeb978f49f..874c2087714 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -779,7 +779,12 @@ void __init uv_system_init(void) for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) uv_possible_blades += hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); - printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); + + /* uv_num_possible_blades() is really the hub count */ + printk(KERN_INFO "UV: Found %d blades, %d hubs\n", + is_uv1_hub() ? uv_num_possible_blades() : + (uv_num_possible_blades() + 1) / 2, + uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); uv_blade_info = kzalloc(bytes, GFP_KERNEL); diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 6042981d030..0e3a82a41a6 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -15,6 +15,7 @@ CFLAGS_common.o := $(nostackp) obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o obj-y += vmware.o hypervisor.o sched.o mshyperv.o +obj-y += rdrand.o obj-$(CONFIG_X86_32) += bugs.o obj-$(CONFIG_X86_64) += bugs_64.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b13ed393dfc..3f4b6dacf1c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -146,7 +146,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) { -#ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; @@ -190,7 +189,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) valid_k7: ; -#endif } static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) @@ -556,6 +554,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* + * The way access filter has a performance penalty on some workloads. + * Disable it on the affected CPUs. + */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { + u64 val; + + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { + val |= 0x1E; + checking_wrmsrl(0xc0011021, val); + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 22a073d7fbf..1579ab92d80 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -675,9 +676,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_early_init) this_cpu->c_early_init(c); -#ifdef CONFIG_SMP c->cpu_index = 0; -#endif filter_cpuid_features(c, false); setup_smep(c); @@ -760,10 +759,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) c->apicid = c->initial_apicid; # endif #endif - -#ifdef CONFIG_X86_HT c->phys_proc_id = c->initial_apicid; -#endif } setup_smep(c); @@ -857,6 +853,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) #endif init_hypervisor(c); + x86_init_rdrand(c); /* * Clear/Set all flags overriden by options, need do it diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index ed6086eedf1..e0dc0005456 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -179,7 +179,6 @@ static void __cpuinit trap_init_f00f_bug(void) static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) { -#ifdef CONFIG_SMP /* calling is from identify_secondary_cpu() ? */ if (!c->cpu_index) return; @@ -196,7 +195,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) WARN_ONCE(1, "WARNING: SMP operation may be unreliable" "with B stepping processors.\n"); } -#endif } static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index c105c533ed9..fde44284cf2 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -330,8 +330,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } -static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, - int index) +static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) { static struct amd_l3_cache *__cpuinitdata l3_caches; int node; @@ -748,14 +747,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) #ifdef CONFIG_SMP -static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) + +static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) { - struct _cpuid4_info *this_leaf, *sibling_leaf; - unsigned long num_threads_sharing; - int index_msb, i, sibling; + struct _cpuid4_info *this_leaf; + int ret, i, sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); - if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { + ret = 0; + if (index == 3) { + ret = 1; for_each_cpu(i, cpu_llc_shared_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, i)) continue; @@ -766,8 +767,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) set_bit(sibling, this_leaf->shared_cpu_map); } } - return; + } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { + ret = 1; + for_each_cpu(i, cpu_sibling_mask(cpu)) { + if (!per_cpu(ici_cpuid4_info, i)) + continue; + this_leaf = CPUID4_INFO_IDX(i, index); + for_each_cpu(sibling, cpu_sibling_mask(cpu)) { + if (!cpu_online(sibling)) + continue; + set_bit(sibling, this_leaf->shared_cpu_map); + } + } } + + return ret; +} + +static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) +{ + struct _cpuid4_info *this_leaf, *sibling_leaf; + unsigned long num_threads_sharing; + int index_msb, i; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + if (c->x86_vendor == X86_VENDOR_AMD) { + if (cache_shared_amd_cpu_map_setup(cpu, index)) + return; + } + this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index 1e8d66c1336..362190bd9e1 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -101,15 +101,19 @@ static struct severity { }; /* - * If the EIPV bit is set, it means the saved IP is the - * instruction which caused the MCE. + * If mcgstatus indicated that ip/cs on the stack were + * no good, then "m->cs" will be zero and we will have + * to assume the worst case (IN_KERNEL) as we actually + * have no idea what we were executing when the machine + * check hit. + * If we do have a good "m->cs" (or a faked one in the + * case we were executing in VM86 mode) we can use it to + * distinguish an exception taken in user from from one + * taken in the kernel. */ static int error_context(struct mce *m) { - if (m->mcgstatus & MCG_STATUS_EIPV) - return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL; - /* Unknown, assume kernel */ - return IN_KERNEL; + return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL; } int mce_severity(struct mce *a, int tolerant, char **msg) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index ff1ae9b6464..1396edf20b9 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -122,9 +122,7 @@ void mce_setup(struct mce *m) m->time = get_seconds(); m->cpuvendor = boot_cpu_data.x86_vendor; m->cpuid = cpuid_eax(1); -#ifdef CONFIG_SMP m->socketid = cpu_data(m->extcpu).phys_proc_id; -#endif m->apicid = cpu_data(m->extcpu).initial_apicid; rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap); } @@ -453,6 +451,13 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) { m->ip = regs->ip; m->cs = regs->cs; + /* + * When in VM86 mode make the cs look like ring 3 + * always. This is a lie, but it's better than passing + * the additional vm86 bit around everywhere. + */ + if (v8086_mode(regs)) + m->cs |= 3; } else { m->ip = 0; m->cs = 0; @@ -990,6 +995,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) */ add_taint(TAINT_MACHINE_CHECK); + mce_get_rip(&m, regs); severity = mce_severity(&m, tolerant, NULL); /* @@ -1028,7 +1034,6 @@ void do_machine_check(struct pt_regs *regs, long error_code) if (severity == MCE_AO_SEVERITY && mce_usable_address(&m)) mce_ring_add(m.addr >> PAGE_SHIFT); - mce_get_rip(&m, regs); mce_log(&m); if (severity > worst) { diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index bb0adad3514..b97aa72702f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -52,6 +52,7 @@ struct threshold_block { unsigned int cpu; u32 address; u16 interrupt_enable; + bool interrupt_capable; u16 threshold_limit; struct kobject kobj; struct list_head miscj; @@ -64,11 +65,9 @@ struct threshold_bank { }; static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks); -#ifdef CONFIG_SMP static unsigned char shared_bank[NR_BANKS] = { 0, 0, 0, 0, 1 }; -#endif static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ @@ -86,6 +85,21 @@ struct thresh_restart { u16 old_limit; }; +static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits) +{ + /* + * bank 4 supports APIC LVT interrupts implicitly since forever. + */ + if (bank == 4) + return true; + + /* + * IntP: interrupt present; if this bit is set, the thresholding + * bank can generate APIC LVT interrupts + */ + return msr_high_bits & BIT(28); +} + static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) { int msr = (hi & MASK_LVTOFF_HI) >> 20; @@ -107,8 +121,10 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) return 1; }; -/* must be called with correct cpu affinity */ -/* Called via smp_call_function_single() */ +/* + * Called via smp_call_function_single(), must be called with correct + * cpu affinity. + */ static void threshold_restart_bank(void *_tr) { struct thresh_restart *tr = _tr; @@ -131,6 +147,12 @@ static void threshold_restart_bank(void *_tr) (new_count & THRESHOLD_MAX); } + /* clear IntType */ + hi &= ~MASK_INT_TYPE_HI; + + if (!tr->b->interrupt_capable) + goto done; + if (tr->set_lvt_off) { if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) { /* set new lvt offset */ @@ -139,9 +161,10 @@ static void threshold_restart_bank(void *_tr) } } - tr->b->interrupt_enable ? - (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : - (hi &= ~MASK_INT_TYPE_HI); + if (tr->b->interrupt_enable) + hi |= INT_TYPE_APIC; + + done: hi |= MASK_COUNT_EN_HI; wrmsr(tr->b->address, lo, hi); @@ -202,18 +225,21 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) if (!block) per_cpu(bank_map, cpu) |= (1 << bank); -#ifdef CONFIG_SMP + if (shared_bank[bank] && c->cpu_core_id) break; -#endif - offset = setup_APIC_mce(offset, - (high & MASK_LVTOFF_HI) >> 20); memset(&b, 0, sizeof(b)); - b.cpu = cpu; - b.bank = bank; - b.block = block; - b.address = address; + b.cpu = cpu; + b.bank = bank; + b.block = block; + b.address = address; + b.interrupt_capable = lvt_interrupt_supported(bank, high); + + if (b.interrupt_capable) { + int new = (high & MASK_LVTOFF_HI) >> 20; + offset = setup_APIC_mce(offset, new); + } mce_threshold_block_init(&b, offset); mce_threshold_vector = amd_threshold_interrupt; @@ -313,6 +339,9 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) struct thresh_restart tr; unsigned long new; + if (!b->interrupt_capable) + return -EINVAL; + if (strict_strtoul(buf, 0, &new) < 0) return -EINVAL; @@ -471,6 +500,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, b->cpu = cpu; b->address = address; b->interrupt_enable = 0; + b->interrupt_capable = lvt_interrupt_supported(bank, high); b->threshold_limit = THRESHOLD_MAX; INIT_LIST_HEAD(&b->miscj); diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 27c625178bf..99cd9d2abb6 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -322,17 +322,6 @@ device_initcall(thermal_throttle_init_device); #endif /* CONFIG_SYSFS */ -/* - * Set up the most two significant bit to notify mce log that this thermal - * event type. - * This is a temp solution. May be changed in the future with mce log - * infrasture. - */ -#define CORE_THROTTLED (0) -#define CORE_POWER_LIMIT ((__u64)1 << 62) -#define PACKAGE_THROTTLED ((__u64)2 << 62) -#define PACKAGE_POWER_LIMIT ((__u64)3 << 62) - static void notify_thresholds(__u64 msr_val) { /* check whether the interrupt handler is defined; @@ -362,27 +351,23 @@ static void intel_thermal_interrupt(void) if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, CORE_LEVEL) != 0) - mce_log_therm_throt_event(CORE_THROTTLED | msr_val); + mce_log_therm_throt_event(msr_val); if (this_cpu_has(X86_FEATURE_PLN)) - if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, + therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, - CORE_LEVEL) != 0) - mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); + CORE_LEVEL); if (this_cpu_has(X86_FEATURE_PTS)) { rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); - if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, + therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, THERMAL_THROTTLING_EVENT, - PACKAGE_LEVEL) != 0) - mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); + PACKAGE_LEVEL); if (this_cpu_has(X86_FEATURE_PLN)) - if (therm_throt_process(msr_val & + therm_throt_process(msr_val & PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, - PACKAGE_LEVEL) != 0) - mce_log_therm_throt_event(PACKAGE_POWER_LIMIT - | msr_val); + PACKAGE_LEVEL); } } diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index fe29c1d2219..4b50c965f0e 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -437,6 +437,7 @@ static __initconst const struct x86_pmu amd_pmu = { * 0x023 DE PERF_CTL[2:0] * 0x02D LS PERF_CTL[3] * 0x02E LS PERF_CTL[3,0] + * 0x031 LS PERF_CTL[2:0] (**) * 0x043 CU PERF_CTL[2:0] * 0x045 CU PERF_CTL[2:0] * 0x046 CU PERF_CTL[2:0] @@ -450,10 +451,12 @@ static __initconst const struct x86_pmu amd_pmu = { * 0x0DD LS PERF_CTL[5:0] * 0x0DE LS PERF_CTL[5:0] * 0x0DF LS PERF_CTL[5:0] + * 0x1C0 EX PERF_CTL[5:3] * 0x1D6 EX PERF_CTL[5:0] * 0x1D8 EX PERF_CTL[5:0] * - * (*) depending on the umask all FPU counters may be used + * (*) depending on the umask all FPU counters may be used + * (**) only one unitmask enabled at a time */ static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); @@ -503,6 +506,12 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev return &amd_f15_PMC3; case 0x02E: return &amd_f15_PMC30; + case 0x031: + if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) + return &amd_f15_PMC20; + return &emptyconstraint; + case 0x1C0: + return &amd_f15_PMC53; default: return &amd_f15_PMC50; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index d812fe2d02b..c81d1f89df0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -754,6 +754,16 @@ static void intel_ds_init(void) } } +void perf_restore_debug_store(void) +{ + struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); + + if (!x86_pmu.bts && !x86_pmu.pebs) + return; + + wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds); +} + #else /* CONFIG_CPU_SUP_INTEL */ static void reserve_ds_buffers(void) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 62ac8cb6ba2..72c365a1240 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - unsigned int cpu = 0; + unsigned int cpu; int i; -#ifdef CONFIG_SMP cpu = c->cpu_index; -#endif seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c new file mode 100644 index 00000000000..feca286c2bb --- /dev/null +++ b/arch/x86/kernel/cpu/rdrand.c @@ -0,0 +1,73 @@ +/* + * This file is part of the Linux kernel. + * + * Copyright (c) 2011, Intel Corporation + * Authors: Fenghua Yu , + * H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include +#include +#include + +static int __init x86_rdrand_setup(char *s) +{ + setup_clear_cpu_cap(X86_FEATURE_RDRAND); + return 1; +} +__setup("nordrand", x86_rdrand_setup); + +/* We can't use arch_get_random_long() here since alternatives haven't run */ +static inline int rdrand_long(unsigned long *v) +{ + int ok; + asm volatile("1: " RDRAND_LONG "\n\t" + "jc 2f\n\t" + "decl %0\n\t" + "jnz 1b\n\t" + "2:" + : "=r" (ok), "=a" (*v) + : "0" (RDRAND_RETRY_LOOPS)); + return ok; +} + +/* + * Force a reseed cycle; we are architecturally guaranteed a reseed + * after no more than 512 128-bit chunks of random data. This also + * acts as a test of the CPU capability. + */ +#define RESEED_LOOP ((512*128)/sizeof(unsigned long)) + +void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_ARCH_RANDOM + unsigned long tmp; + int i, count, ok; + + if (!cpu_has(c, X86_FEATURE_RDRAND)) + return; /* Nothing to do */ + + for (count = i = 0; i < RESEED_LOOP; i++) { + ok = rdrand_long(&tmp); + if (ok) + count++; + } + + if (count != RESEED_LOOP) + clear_cpu_cap(c, X86_FEATURE_RDRAND); +#endif +} diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index c7f64e6f537..ea6106c5ef7 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) const struct cpuid_bit *cb; static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { - { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 }, + { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5c1a9197491..2df12522aff 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -98,12 +98,6 @@ #endif .endm -#ifdef CONFIG_VM86 -#define resume_userspace_sig check_userspace -#else -#define resume_userspace_sig resume_userspace -#endif - /* * User gs save/restore * @@ -327,10 +321,19 @@ ret_from_exception: preempt_stop(CLBR_ANY) ret_from_intr: GET_THREAD_INFO(%ebp) -check_userspace: +resume_userspace_sig: +#ifdef CONFIG_VM86 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax +#else + /* + * We can be coming here from a syscall done in the kernel space, + * e.g. a failed kernel_execve(). + */ + movl PT_CS(%esp), %eax + andl $SEGMENT_RPL_MASK, %eax +#endif cmpl $USER_RPL, %eax jb resume_kernel # not returning to v8086 or userspace @@ -1026,7 +1029,7 @@ ENTRY(xen_sysenter_target) ENTRY(xen_hypervisor_callback) CFI_STARTPROC - pushl_cfi $0 + pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL TRACE_IRQS_OFF @@ -1068,14 +1071,15 @@ ENTRY(xen_failsafe_callback) 2: mov 8(%esp),%es 3: mov 12(%esp),%fs 4: mov 16(%esp),%gs + /* EAX == 0 => Category 1 (Bad segment) + EAX != 0 => Category 2 (Bad IRET) */ testl %eax,%eax popl_cfi %eax lea 16(%esp),%esp CFI_ADJUST_CFA_OFFSET -16 jz 5f - addl $16,%esp - jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) -5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) + jmp iret_exc +5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL jmp ret_from_exception CFI_ENDPROC diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8a445a0c989..dd4dba4fadc 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1308,7 +1308,7 @@ ENTRY(xen_failsafe_callback) CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 - pushq_cfi $0 + pushq_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL jmp error_exit CFI_ENDPROC diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index af0699ba48c..f6c467484eb 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -5,8 +5,6 @@ #include #include -#define BIOS_LOWMEM_KILOBYTES 0x413 - /* * The BIOS places the EBDA/XBDA at the top of conventional * memory, and usually decreases the reported amount of @@ -16,17 +14,30 @@ * chipset: reserve a page before VGA to prevent PCI prefetch * into it (errata #56). Usually the page is reserved anyways, * unless you have no PS/2 mouse plugged in. + * + * This functions is deliberately very conservative. Losing + * memory in the bottom megabyte is rarely a problem, as long + * as we have enough memory to install the trampoline. Using + * memory that is in use by the BIOS or by some DMA device + * the BIOS didn't shut down *is* a big problem. */ + +#define BIOS_LOWMEM_KILOBYTES 0x413 +#define LOWMEM_CAP 0x9f000U /* Absolute maximum */ +#define INSANE_CUTOFF 0x20000U /* Less than this = insane */ + void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; - /* To determine the position of the EBDA and the */ - /* end of conventional memory, we need to look at */ - /* the BIOS data area. In a paravirtual environment */ - /* that area is absent. We'll just have to assume */ - /* that the paravirt case can handle memory setup */ - /* correctly, without our help. */ + /* + * To determine the position of the EBDA and the + * end of conventional memory, we need to look at + * the BIOS data area. In a paravirtual environment + * that area is absent. We'll just have to assume + * that the paravirt case can handle memory setup + * correctly, without our help. + */ if (paravirt_enabled()) return; @@ -37,19 +48,23 @@ void __init reserve_ebda_region(void) /* start of EBDA area */ ebda_addr = get_bios_ebda(); - /* Fixup: bios puts an EBDA in the top 64K segment */ - /* of conventional memory, but does not adjust lowmem. */ - if ((lowmem - ebda_addr) <= 0x10000) - lowmem = ebda_addr; + /* + * Note: some old Dells seem to need 4k EBDA without + * reporting so, so just consider the memory above 0x9f000 + * to be off limits (bugzilla 2990). + */ + + /* If the EBDA address is below 128K, assume it is bogus */ + if (ebda_addr < INSANE_CUTOFF) + ebda_addr = LOWMEM_CAP; - /* Fixup: bios does not report an EBDA at all. */ - /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ - if ((ebda_addr == 0) && (lowmem >= 0x9f000)) - lowmem = 0x9f000; + /* If lowmem is less than 128K, assume it is bogus */ + if (lowmem < INSANE_CUTOFF) + lowmem = LOWMEM_CAP; - /* Paranoia: should never happen, but... */ - if ((lowmem == 0) || (lowmem >= 0x100000)) - lowmem = 0x9f000; + /* Use the lower of the lowmem and EBDA markers as the cutoff */ + lowmem = min(lowmem, ebda_addr); + lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */ /* reserve all memory between lowmem and the 1MB mark */ memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index aa083d35074..0aa649ee563 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -427,7 +427,7 @@ void hpet_msi_unmask(struct irq_data *data) /* unmask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); - cfg |= HPET_TN_FSB; + cfg |= HPET_TN_ENABLE | HPET_TN_FSB; hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } @@ -438,7 +438,7 @@ void hpet_msi_mask(struct irq_data *data) /* mask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); - cfg &= ~HPET_TN_FSB; + cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); } diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 6c0802eb2f7..a669961bb15 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -159,10 +159,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { u64 sum = atomic_read(&irq_err_count); - -#ifdef CONFIG_X86_IO_APIC - sum += atomic_read(&irq_mis_count); -#endif return sum; } diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5f9ecff328b..fc1f48dc998 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #include #include @@ -710,6 +712,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) regs->ip = ip; } +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ + int err; + char opc[BREAK_INSTR_SIZE]; + + bpt->type = BP_BREAKPOINT; + err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + BREAK_INSTR_SIZE); + if (err) + return err; + err = probe_kernel_write((char *)bpt->bpt_addr, + arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); +#ifdef CONFIG_DEBUG_RODATA + if (!err) + return err; + /* + * It is safe to call text_poke() because normal kernel execution + * is stopped on all cores, so long as the text_mutex is not locked. + */ + if (mutex_is_locked(&text_mutex)) + return -EBUSY; + text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, + BREAK_INSTR_SIZE); + err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + if (err) + return err; + if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) + return -EINVAL; + bpt->type = BP_POKE_BREAKPOINT; +#endif /* CONFIG_DEBUG_RODATA */ + return err; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ +#ifdef CONFIG_DEBUG_RODATA + int err; + char opc[BREAK_INSTR_SIZE]; + + if (bpt->type != BP_POKE_BREAKPOINT) + goto knl_write; + /* + * It is safe to call text_poke() because normal kernel execution + * is stopped on all cores, so long as the text_mutex is not locked. + */ + if (mutex_is_locked(&text_mutex)) + goto knl_write; + text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); + err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); + if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) + goto knl_write; + return err; +knl_write: +#endif /* CONFIG_DEBUG_RODATA */ + return probe_kernel_write((char *)bpt->bpt_addr, + (char *)bpt->saved_instr, BREAK_INSTR_SIZE); +} + struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index c5610384ab1..53ab9ff25a8 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -162,6 +162,7 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 #define F15H_MPB_MAX_SIZE 4096 +#define F16H_MPB_MAX_SIZE 3458 switch (c->x86) { case 0x14: @@ -170,6 +171,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) case 0x15: max_size = F15H_MPB_MAX_SIZE; break; + case 0x16: + max_size = F16H_MPB_MAX_SIZE; + break; default: max_size = F1XH_MPB_MAX_SIZE; break; @@ -298,13 +302,33 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) return state; } +/* + * AMD microcode firmware naming convention, up to family 15h they are in + * the legacy file: + * + * amd-ucode/microcode_amd.bin + * + * This legacy file is always smaller than 2K in size. + * + * Starting at family 15h they are in family specific firmware files: + * + * amd-ucode/microcode_amd_fam15h.bin + * amd-ucode/microcode_amd_fam16h.bin + * ... + * + * These might be larger than 2K. + */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - const char *fw_name = "amd-ucode/microcode_amd.bin"; + char fw_name[36] = "amd-ucode/microcode_amd.bin"; const struct firmware *fw; enum ucode_state ret = UCODE_NFOUND; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + if (c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); - if (request_firmware(&fw, fw_name, device)) { + if (request_firmware(&fw, (const char *)fw_name, device)) { pr_err("failed to load file %s\n", fw_name); goto out; } diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index f9242800bc8..c4e246541c5 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t size) { unsigned long val; - int cpu = dev->id; - int ret = 0; - char *end; + int cpu; + ssize_t ret = 0, tmp_ret; - val = simple_strtoul(buf, &end, 0); - if (end == buf) + /* allow reload only from the BSP */ + if (boot_cpu_data.cpu_index != dev->id) return -EINVAL; - if (val == 1) { - get_online_cpus(); - if (cpu_online(cpu)) - ret = reload_for_cpu(cpu); - put_online_cpus(); + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + if (val != 1) + return size; + + get_online_cpus(); + for_each_online_cpu(cpu) { + tmp_ret = reload_for_cpu(cpu); + if (tmp_ret != 0) + pr_warn("Error reloading microcode on CPU %d\n", cpu); + + /* save retval of the first encountered reload error */ + if (!ret) + ret = tmp_ret; } + put_online_cpus(); if (!ret) ret = size; diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 12fcbe2c143..f7d1a649a5b 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -175,6 +175,9 @@ static int msr_open(struct inode *inode, struct file *file) unsigned int cpu; struct cpuinfo_x86 *c; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + cpu = iminor(file->f_path.dentry->d_inode); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 869e1aeeb71..704faba97c1 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -253,6 +253,18 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } +void paravirt_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -282,18 +294,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return percpu_read(paravirt_lazy_mode); } -void arch_flush_lazy_mmu_mode(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -462,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, + .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e1ba8cb24e4..427250211f3 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -341,34 +341,10 @@ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); #endif -#ifdef CONFIG_X86_32 -/* - * This halt magic was a workaround for ancient floppy DMA - * wreckage. It should be safe to remove. - */ -static int hlt_counter; -void disable_hlt(void) -{ - hlt_counter++; -} -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ - hlt_counter--; -} -EXPORT_SYMBOL(enable_hlt); - -static inline int hlt_use_halt(void) -{ - return (!hlt_counter && boot_cpu_data.hlt_works_ok); -} -#else static inline int hlt_use_halt(void) { return 1; } -#endif /* * We use this if we don't have any better diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index a3d0dc59067..fcdb1b34aa1 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -293,22 +293,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); - bool preload_fpu; + fpu_switch_t fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ - /* - * If the task has used fpu the last 5 timeslices, just do a full - * restore of the math state immediately to avoid the trap; the - * chances of needing FPU soon are obviously high now - */ - preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; - - __unlazy_fpu(prev_p); - - /* we're going to use this soon, after a few expensive things */ - if (preload_fpu) - prefetch(next->fpu.state); + fpu = switch_fpu_prepare(prev_p, next_p); /* * Reload esp0. @@ -348,11 +337,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); - /* If we're going to preload the fpu context, make sure clts - is run while we're batching the cpu state updates. */ - if (preload_fpu) - clts(); - /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so @@ -362,15 +346,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ arch_end_context_switch(next_p); - if (preload_fpu) - __math_state_restore(); - /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) lazy_load_gs(next->gs); + switch_fpu_finish(next_p, fpu); + percpu_write(current_task, next_p); return prev_p; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ca6f7ab8df3..b01898d2744 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -377,18 +377,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); unsigned fsindex, gsindex; - bool preload_fpu; + fpu_switch_t fpu; - /* - * If the task has used fpu the last 5 timeslices, just do a full - * restore of the math state immediately to avoid the trap; the - * chances of needing FPU soon are obviously high now - */ - preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; - - /* we're going to use this soon, after a few expensive things */ - if (preload_fpu) - prefetch(next->fpu.state); + fpu = switch_fpu_prepare(prev_p, next_p); /* * Reload esp0, LDT and the page table pointer: @@ -418,13 +409,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) load_TLS(next, cpu); - /* Must be after DS reload */ - __unlazy_fpu(prev_p); - - /* Make sure cpu is ready for new context */ - if (preload_fpu) - clts(); - /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so @@ -465,6 +449,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) wrmsrl(MSR_KERNEL_GS_BASE, next->gs); prev->gsindex = gsindex; + switch_fpu_finish(next_p, fpu); + /* * Switch the PDA and FPU contexts. */ @@ -483,13 +469,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) __switch_to_xtra(prev_p, next_p, tss); - /* - * Preload the FPU context, now that we've determined that the - * task is likely to be using it. - */ - if (preload_fpu) - __math_state_restore(); - return prev_p; } diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 807c2a2b80f..911e16d8dfa 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -164,6 +165,35 @@ static inline bool invalid_selector(u16 value) #define FLAG_MASK FLAG_MASK_32 +/* + * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. + * + * Now, if the stack is empty, '®s->sp' is out of range. In this + * case we try to take the previous stack. To always return a non-null + * stack pointer we fall back to regs as stack if no previous stack + * exists. + * + * This is valid only for kernel mode traps. + */ +unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; + struct thread_info *tinfo; + + if (context == (sp & ~(THREAD_SIZE - 1))) + return sp; + + tinfo = (struct thread_info *)context; + if (tinfo->previous_esp) + return tinfo->previous_esp; + + return (unsigned long)regs; +} +EXPORT_SYMBOL_GPL(kernel_stack_pointer); + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index d4a705f2283..89d68777f73 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -452,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), }, }, + { /* Handle problems with rebooting on the Precision M6600. */ + .callback = set_pci_reboot, + .ident = "Dell OptiPlex 990", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), + }, + }, { } }; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index afaf38447ef..6c4e9ffc290 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -631,6 +631,83 @@ static __init void reserve_ibft_region(void) static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; +static bool __init snb_gfx_workaround_needed(void) +{ +#ifdef CONFIG_PCI + int i; + u16 vendor, devid; + static const u16 snb_ids[] = { + 0x0102, + 0x0112, + 0x0122, + 0x0106, + 0x0116, + 0x0126, + 0x010a, + }; + + /* Assume no if something weird is going on with PCI */ + if (!early_pci_allowed()) + return false; + + vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID); + if (vendor != 0x8086) + return false; + + devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID); + for (i = 0; i < ARRAY_SIZE(snb_ids); i++) + if (devid == snb_ids[i]) + return true; +#endif + + return false; +} + +/* + * Sandy Bridge graphics has trouble with certain ranges, exclude + * them from allocation. + */ +static void __init trim_snb_memory(void) +{ + static const unsigned long bad_pages[] = { + 0x20050000, + 0x20110000, + 0x20130000, + 0x20138000, + 0x40004000, + }; + int i; + + if (!snb_gfx_workaround_needed()) + return; + + printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n"); + + /* + * Reserve all memory below the 1 MB mark that has not + * already been reserved. + */ + memblock_reserve(0, 1<<20); + + for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { + if (memblock_reserve(bad_pages[i], PAGE_SIZE)) + printk(KERN_WARNING "failed to reserve 0x%08lx\n", + bad_pages[i]); + } +} + +/* + * Here we put platform-specific memory range workarounds, i.e. + * memory known to be corrupt or otherwise in need to be reserved on + * specific platforms. + * + * If this gets used more widely it could use a real dispatch mechanism. + */ +static void __init trim_platform_memory_ranges(void) +{ + trim_snb_memory(); +} + static void __init trim_bios_range(void) { /* @@ -651,6 +728,7 @@ static void __init trim_bios_range(void) * take them out. */ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } @@ -929,6 +1007,8 @@ void __init setup_arch(char **cmdline_p) setup_trampolines(); + trim_platform_memory_ranges(); + init_gbpages(); /* max_pfn_mapped is updated here */ @@ -937,8 +1017,21 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { - max_pfn_mapped = init_memory_mapping(1UL<<32, - max_pfn<addr + ei->size <= 1UL << 32) + continue; + + if (ei->type == E820_RESERVED) + continue; + + max_pfn_mapped = init_memory_mapping( + ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr, + ei->addr + ei->size); + } + /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 71f4727da37..5a98aa27218 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void) #endif rc = -EINVAL; if (pcpu_chosen_fc != PCPU_FC_PAGE) { - const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE; const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; + size_t atom_size; + /* + * On 64bit, use PMD_SIZE for atom_size so that embedded + * percpu areas are aligned to PMD. This, in the future, + * can also allow using PMD mappings in vmalloc area. Use + * PAGE_SIZE on 32bit as vmalloc space is highly contended + * and large vmalloc area allocs can easily fail. + */ +#ifdef CONFIG_X86_64 + atom_size = PMD_SIZE; +#else + atom_size = PAGE_SIZE; +#endif rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size, atom_size, pcpu_cpu_distance, diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 6bb7b8579e7..bcfec2d2376 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -163,7 +163,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset, { const struct desc_struct *tls; - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; @@ -198,7 +198,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; const struct user_desc *info; - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b9b67166f9d..1b26e01047b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -717,25 +717,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) } /* - * __math_state_restore assumes that cr0.TS is already clear and the - * fpu state is all ready for use. Used during context switch. + * This gets called with the process already owning the + * FPU state, and with CR0.TS cleared. It just needs to + * restore the FPU register state. */ -void __math_state_restore(void) +void __math_state_restore(struct task_struct *tsk) { - struct thread_info *thread = current_thread_info(); - struct task_struct *tsk = thread->task; + /* We need a safe address that is cheap to find and that is already + in L1. We've just brought in "tsk->thread.has_fpu", so use that */ +#define safe_address (tsk->thread.has_fpu) + + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception + is pending. Clear the x87 state here by setting it to fixed + values. safe_address is a random variable that should be in L1 */ + alternative_input( + ASM_NOP8 ASM_NOP2, + "emms\n\t" /* clear stack tags */ + "fildl %P[addr]", /* set F?P to defined value */ + X86_FEATURE_FXSAVE_LEAK, + [addr] "m" (safe_address)); /* * Paranoid restore. send a SIGSEGV if we fail to restore the state. */ if (unlikely(restore_fpu_checking(tsk))) { - stts(); + __thread_fpu_end(tsk); force_sig(SIGSEGV, tsk); return; } - - thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ - tsk->fpu_counter++; } /* @@ -745,13 +754,12 @@ void __math_state_restore(void) * Careful.. There are problems with IBM-designed IRQ13 behaviour. * Don't touch unless you *really* know how it works. * - * Must be called with kernel preemption disabled (in this case, - * local interrupts are disabled at the call-site in entry.S). + * Must be called with kernel preemption disabled (eg with local + * local interrupts as in the case of do_device_not_available). */ -asmlinkage void math_state_restore(void) +void math_state_restore(void) { - struct thread_info *thread = current_thread_info(); - struct task_struct *tsk = thread->task; + struct task_struct *tsk = current; if (!tsk_used_math(tsk)) { local_irq_enable(); @@ -768,9 +776,10 @@ asmlinkage void math_state_restore(void) local_irq_disable(); } - clts(); /* Allow maths ops (or we recurse) */ + __thread_fpu_begin(tsk); + __math_state_restore(tsk); - __math_state_restore(); + tsk->fpu_counter++; } EXPORT_SYMBOL_GPL(math_state_restore); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6cc6922262a..4406c038a0a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) if (cpu_khz) { *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; - *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR); + *offset = ns_now - mult_frac(tsc_now, *scale, + (1UL << CYC2NS_SCALE_FACTOR)); } sched_clock_idle_wakeup_event(0); @@ -956,6 +957,16 @@ static int __init init_tsc_clocksource(void) clocksource_tsc.rating = 0; clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; } + + /* + * Trust the results of the earlier calibration on systems + * exporting a reliable TSC. + */ + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { + clocksource_register_khz(&clocksource_tsc, tsc_khz); + return 0; + } + schedule_delayed_work(&tsc_irqwork, 0); return 0; } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 863f8753ab0..04b87269edf 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) spinlock_t *ptl; int i; + down_write(&mm->mmap_sem); pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; @@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) } pte_unmap_unlock(pte, ptl); out: + up_write(&mm->mmap_sem); flush_tlb(); } diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index a3911343976..71109111411 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk) if (!fx) return; - BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU); + BUG_ON(__thread_has_fpu(tsk)); xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; @@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf) if (!used_math()) return 0; - if (task_thread_info(tsk)->status & TS_USEDFPU) { + if (user_has_fpu()) { if (use_xsave()) err = xsave_user(buf); else @@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf) if (err) return err; - task_thread_info(tsk)->status &= ~TS_USEDFPU; - stts(); + user_fpu_end(); } else { sanitize_i387_state(tsk); if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, @@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf) return err; } - if (!(task_thread_info(current)->status & TS_USEDFPU)) { - clts(); - task_thread_info(current)->status |= TS_USEDFPU; - } + user_fpu_begin(); if (use_xsave()) err = restore_user_xstate(buf); else diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index adc98675cda..3e7d9138dd2 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1901,6 +1901,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ss->p = 1; } +static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) +{ + struct x86_emulate_ops *ops = ctxt->ops; + u32 eax, ebx, ecx, edx; + + /* + * syscall should always be enabled in longmode - so only become + * vendor specific (cpuid) if other modes are active... + */ + if (ctxt->mode == X86EMUL_MODE_PROT64) + return true; + + eax = 0x00000000; + ecx = 0x00000000; + if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { + /* + * Intel ("GenuineIntel") + * remark: Intel CPUs only support "syscall" in 64bit + * longmode. Also an 64bit guest with a + * 32bit compat-app running will #UD !! While this + * behaviour can be fixed (by emulating) into AMD + * response - CPUs of AMD can't behave like Intel. + */ + if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && + ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && + edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) + return false; + + /* AMD ("AuthenticAMD") */ + if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && + ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && + edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) + return true; + + /* AMD ("AMDisbetter!") */ + if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && + ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && + edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) + return true; + } + + /* default: (not Intel, not AMD), apply Intel's stricter rules... */ + return false; +} + static int emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { @@ -1915,9 +1960,15 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); + if (!(em_syscall_is_enabled(ctxt))) + return emulate_ud(ctxt); + ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, ops, &cs, &ss); + if (!(efer & EFER_SCE)) + return emulate_ud(ctxt); + ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index efad7238505..43e04d128af 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) +static void create_pit_timer(struct kvm *kvm, u32 val, int is_period) { + struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; struct kvm_timer *pt = &ps->pit_timer; s64 interval; + if (!irqchip_in_kernel(kvm)) + return; + interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); pr_debug("create pit timer, interval is %llu nsec\n", interval); @@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) /* FIXME: enhance mode 4 precision */ case 4: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { - create_pit_timer(ps, val, 0); + create_pit_timer(kvm, val, 0); } break; case 2: case 3: if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ - create_pit_timer(ps, val, 1); + create_pit_timer(kvm, val, 1); } break; default: diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d48ec60ea42..be1d830ca98 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -948,7 +948,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif - if (current_thread_info()->status & TS_USEDFPU) + if (__thread_has_fpu(current)) clts(); load_gdt(&__get_cpu_var(host_gdt)); } @@ -3836,6 +3836,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (err != EMULATE_DONE) return 0; + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + ret = kvm_emulate_halt(vcpu); + goto out; + } + if (signal_pending(current)) goto out; if (need_resched()) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 77c9d8673dc..15e79a61372 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -575,6 +575,9 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; + if (!cpu_has_xsave) + return 0; + best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); } @@ -1070,7 +1073,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags; struct kvm_vcpu_arch *vcpu = &v->arch; - void *shared_kaddr; unsigned long this_tsc_khz; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp; @@ -1106,7 +1108,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_restore(flags); - if (!vcpu->time_page) + if (!vcpu->pv_time_enabled) return 0; /* @@ -1164,14 +1166,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ vcpu->hv_clock.version += 2; - shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); - - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); - - kunmap_atomic(shared_kaddr, KM_USER0); - - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); return 0; } @@ -1451,7 +1448,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1461,10 +1459,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - if (vcpu->arch.time_page) { - kvm_release_page_dirty(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + vcpu->arch.pv_time_enabled = false; } int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) @@ -1524,6 +1519,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { + u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1533,16 +1529,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (!(data & 1)) break; - /* ...but clean it before doing the actual write */ - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - - vcpu->arch.time_page = - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + gpa_offset = data & ~(PAGE_MASK | 1); - if (is_error_page(vcpu->arch.time_page)) { - kvm_release_page_clean(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) + vcpu->arch.pv_time_enabled = false; + else + vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: @@ -3410,6 +3404,9 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; + r = -EINVAL; + if (atomic_read(&kvm->online_vcpus)) + goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { @@ -4407,6 +4404,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } +static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) +{ + struct kvm_cpuid_entry2 *cpuid = NULL; + + if (eax && ecx) + cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), + *eax, *ecx); + + if (cpuid) { + *eax = cpuid->eax; + *ecx = cpuid->ecx; + if (ebx) + *ebx = cpuid->ebx; + if (edx) + *edx = cpuid->edx; + return true; + } + + return false; +} + static struct x86_emulate_ops emulate_ops = { .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, @@ -4437,6 +4456,7 @@ static struct x86_emulate_ops emulate_ops = { .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, + .get_cpuid = emulator_get_cpuid, }; static void cache_all_regs(struct kvm_vcpu *vcpu) @@ -5828,6 +5848,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int pending_vec, max_bits, idx; struct desc_ptr dt; + if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) + return -EINVAL; + dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); @@ -6093,12 +6116,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) if (r == 0) r = kvm_mmu_setup(vcpu); vcpu_put(vcpu); - if (r < 0) - goto free_vcpu; - return 0; -free_vcpu: - kvm_x86_ops->vcpu_free(vcpu); return r; } @@ -6171,6 +6189,11 @@ void kvm_arch_check_processor_compat(void *rtn) kvm_x86_ops->check_processor_compatibility(rtn); } +bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) +{ + return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); +} + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; @@ -6220,6 +6243,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; + vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); return 0; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index db832fd65ec..2d452471b4a 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1309,6 +1309,7 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index fc45ba887d0..e395693abdb 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops) } /* TSC based delay: */ -static void delay_tsc(unsigned long loops) +static void delay_tsc(unsigned long __loops) { - unsigned long bclock, now; + u32 bclock, now, loops = __loops; int cpu; preempt_disable(); diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index b7c2849ffb6..554b7b528f0 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -169,10 +169,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) char c; unsigned zero_len; - for (; len; --len) { + for (; len; --len, to++) { if (__get_user_nocheck(c, from++, sizeof(char))) break; - if (__put_user_nocheck(c, to++, sizeof(char))) + if (__put_user_nocheck(c, to, sizeof(char))) break; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2dbf6bf4c7e..7653f14aba2 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -376,10 +376,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); - else + arch_flush_lazy_mmu_mode(); + } else { BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } /* * Below here mismatches are bugs because these lower tables @@ -720,12 +722,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, if (is_errata100(regs, address)) return; - if (unlikely(show_unhandled_signals)) + /* Kernel addresses are always protection faults: */ + if (address >= TASK_SIZE) + error_code |= PF_PROT; + + if (likely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); - /* Kernel addresses are always protection faults: */ tsk->thread.cr2 = address; - tsk->thread.error_code = error_code | (address >= TASK_SIZE); + tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f581a18c0d4..df7d12c9af2 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) } /* - * search for a shareable pmd page for hugetlb. + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the + * !shared pmd case because we can allocate the pmd later as well, it makes the + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_mutex section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing. */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +static pte_t * +huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; @@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; + pte_t *pte; if (!vma_shareable(vma, addr)) - return; + return (pte_t *)pmd_alloc(mm, pud, addr); mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { @@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: + pte = (pte_t *)pmd_alloc(mm, pud, addr); mutex_unlock(&mapping->i_mmap_mutex); + return pte; } /* @@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); - pte = (pte_t *) pmd_alloc(mm, pud, addr); + pte = huge_pmd_share(mm, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 87488b93a65..96c4577c5cb 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -28,37 +28,56 @@ int direct_gbpages #endif ; -static void __init find_early_table_space(unsigned long end, int use_pse, - int use_gbpages) +struct map_range { + unsigned long start; + unsigned long end; + unsigned page_size_mask; +}; + +/* + * First calculate space needed for kernel direct mapping page tables to cover + * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB + * pages. Then find enough contiguous space for those page tables. + */ +static void __init find_early_table_space(struct map_range *mr, int nr_range) { - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; + int i; + unsigned long puds = 0, pmds = 0, ptes = 0, tables; + unsigned long start = 0, good_end; + unsigned long pgd_extra = 0; phys_addr_t base; - puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; - tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); - - if (use_gbpages) { - unsigned long extra; + for (i = 0; i < nr_range; i++) { + unsigned long range, extra; - extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); - pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; - } else - pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; + if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT)) + pgd_extra++; - tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); + range = mr[i].end - mr[i].start; + puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; - if (use_pse) { - unsigned long extra; + if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { + extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); + pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; + } else { + pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; + } - extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); + if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { + extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); #ifdef CONFIG_X86_32 - extra += PMD_SIZE; + extra += PMD_SIZE; #endif - ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; - } else - ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; + ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; + } else { + ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; + } + } + tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); + tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); + tables += (pgd_extra * PAGE_SIZE); #ifdef CONFIG_X86_32 /* for fixmap */ @@ -74,8 +93,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse, pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); - printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); + printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", + mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, + (pgt_buf_top << PAGE_SHIFT) - 1); } void __init native_pagetable_reserve(u64 start, u64 end) @@ -83,12 +103,6 @@ void __init native_pagetable_reserve(u64 start, u64 end) memblock_x86_reserve_range(start, end, "PGTABLE"); } -struct map_range { - unsigned long start; - unsigned long end; - unsigned page_size_mask; -}; - #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ @@ -260,7 +274,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * nodes are discovered. */ if (!after_bootmem) - find_early_table_space(end, use_pse, use_gbpages); + find_early_table_space(mr, nr_range); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index bbaaa005bf0..44b93da1840 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr) if (pud_none(*pud)) return 0; + if (pud_large(*pud)) + return pfn_valid(pud_pfn(*pud)); + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 1dab5194fd9..f927429d07c 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void) */ if (current->flags & PF_RANDOMIZE) { if (mmap_is_ia32()) - rnd = (long)get_random_int() % (1<<8); + rnd = get_random_int() % (1<<8); else - rnd = (long)(get_random_int() % (1<<28)); + rnd = get_random_int() % (1<<28); } return rnd << PAGE_SHIFT; } diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index f5510d889a2..469ccae3149 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -207,9 +207,6 @@ static void __init setup_node_data(int nid, u64 start, u64 end) if (end && (end - start) < NODE_MIN_SIZE) return; - /* initialize remap allocator before aligning to ZONE_ALIGN */ - init_alloc_remap(nid, start, end); - start = roundup(start, ZONE_ALIGN); printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 849a975d3fa..025d469f28d 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, extern unsigned long highend_pfn, highstart_pfn; -#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) - -static void *node_remap_start_vaddr[MAX_NUMNODES]; -void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); - -/* - * Remap memory allocator - */ -static unsigned long node_remap_start_pfn[MAX_NUMNODES]; -static void *node_remap_end_vaddr[MAX_NUMNODES]; -static void *node_remap_alloc_vaddr[MAX_NUMNODES]; - -/** - * alloc_remap - Allocate remapped memory - * @nid: NUMA node to allocate memory from - * @size: The size of allocation - * - * Allocate @size bytes from the remap area of NUMA node @nid. The - * size of the remap area is predetermined by init_alloc_remap() and - * only the callers considered there should call this function. For - * more info, please read the comment on top of init_alloc_remap(). - * - * The caller must be ready to handle allocation failure from this - * function and fall back to regular memory allocator in such cases. - * - * CONTEXT: - * Single CPU early boot context. - * - * RETURNS: - * Pointer to the allocated memory on success, %NULL on failure. - */ -void *alloc_remap(int nid, unsigned long size) -{ - void *allocation = node_remap_alloc_vaddr[nid]; - - size = ALIGN(size, L1_CACHE_BYTES); - - if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) - return NULL; - - node_remap_alloc_vaddr[nid] += size; - memset(allocation, 0, size); - - return allocation; -} - -#ifdef CONFIG_HIBERNATION -/** - * resume_map_numa_kva - add KVA mapping to the temporary page tables created - * during resume from hibernation - * @pgd_base - temporary resume page directory - */ -void resume_map_numa_kva(pgd_t *pgd_base) -{ - int node; - - for_each_online_node(node) { - unsigned long start_va, start_pfn, nr_pages, pfn; - - start_va = (unsigned long)node_remap_start_vaddr[node]; - start_pfn = node_remap_start_pfn[node]; - nr_pages = (node_remap_end_vaddr[node] - - node_remap_start_vaddr[node]) >> PAGE_SHIFT; - - printk(KERN_DEBUG "%s: node %d\n", __func__, node); - - for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { - unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); - pgd_t *pgd = pgd_base + pgd_index(vaddr); - pud_t *pud = pud_offset(pgd, vaddr); - pmd_t *pmd = pmd_offset(pud, vaddr); - - set_pmd(pmd, pfn_pmd(start_pfn + pfn, - PAGE_KERNEL_LARGE_EXEC)); - - printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", - __func__, vaddr, start_pfn + pfn); - } - } -} -#endif - -/** - * init_alloc_remap - Initialize remap allocator for a NUMA node - * @nid: NUMA node to initizlie remap allocator for - * - * NUMA nodes may end up without any lowmem. As allocating pgdat and - * memmap on a different node with lowmem is inefficient, a special - * remap allocator is implemented which can be used by alloc_remap(). - * - * For each node, the amount of memory which will be necessary for - * pgdat and memmap is calculated and two memory areas of the size are - * allocated - one in the node and the other in lowmem; then, the area - * in the node is remapped to the lowmem area. - * - * As pgdat and memmap must be allocated in lowmem anyway, this - * doesn't waste lowmem address space; however, the actual lowmem - * which gets remapped over is wasted. The amount shouldn't be - * problematic on machines this feature will be used. - * - * Initialization failure isn't fatal. alloc_remap() is used - * opportunistically and the callers will fall back to other memory - * allocation mechanisms on failure. - */ -void __init init_alloc_remap(int nid, u64 start, u64 end) -{ - unsigned long start_pfn = start >> PAGE_SHIFT; - unsigned long end_pfn = end >> PAGE_SHIFT; - unsigned long size, pfn; - u64 node_pa, remap_pa; - void *remap_va; - - /* - * The acpi/srat node info can show hot-add memroy zones where - * memory could be added but not currently present. - */ - printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", - nid, start_pfn, end_pfn); - - /* calculate the necessary space aligned to large page size */ - size = node_memmap_size_bytes(nid, start_pfn, end_pfn); - size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); - size = ALIGN(size, LARGE_PAGE_BYTES); - - /* allocate node memory and the lowmem remap area */ - node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); - if (node_pa == MEMBLOCK_ERROR) { - pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", - size, nid); - return; - } - memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); - - remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, - max_low_pfn << PAGE_SHIFT, - size, LARGE_PAGE_BYTES); - if (remap_pa == MEMBLOCK_ERROR) { - pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", - size, nid); - memblock_x86_free_range(node_pa, node_pa + size); - return; - } - memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); - remap_va = phys_to_virt(remap_pa); - - /* perform actual remap */ - for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) - set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), - (node_pa >> PAGE_SHIFT) + pfn, - PAGE_KERNEL_LARGE); - - /* initialize remap allocator parameters */ - node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; - node_remap_start_vaddr[nid] = remap_va; - node_remap_end_vaddr[nid] = remap_va + size; - node_remap_alloc_vaddr[nid] = remap_va; - - printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", - nid, node_pa, node_pa + size, remap_va, remap_va + size); -} - void __init initmem_init(void) { x86_numa_init(); diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index 7178c3afe05..ad86ec91e64 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h @@ -21,12 +21,6 @@ void __init numa_reset_distance(void); void __init x86_numa_init(void); -#ifdef CONFIG_X86_64 -static inline void init_alloc_remap(int nid, u64 start, u64 end) { } -#else -void __init init_alloc_remap(int nid, u64 start, u64 end); -#endif - #ifdef CONFIG_NUMA_EMU void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt); diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 81dbfdeb080..7efd0c615d5 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c @@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) return; pxm = pa->proximity_domain_lo; + if (acpi_srat_revision >= 2) + pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); @@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; + if (acpi_srat_revision <= 1) + pxm &= 0xff; node = setup_node(pxm); if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 7b65f752c5f..5a5b6e4dd73 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp) cleanup_addr = proglen; /* epilogue address */ for (pass = 0; pass < 10; pass++) { + u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; - if (seen) { + if (seen_or_pass0) { EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ /* note : must save %rbx in case bpf_error is hit */ - if (seen & (SEEN_XREG | SEEN_DATAREF)) + if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF)) EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ - if (seen & SEEN_XREG) + if (seen_or_pass0 & SEEN_XREG) CLEAR_X(); /* make sure we dont leek kernel memory */ /* @@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp) * r9 = skb->len - skb->data_len * r8 = skb->data */ - if (seen & SEEN_DATAREF) { + if (seen_or_pass0 & SEEN_DATAREF) { if (offsetof(struct sk_buff, len) <= 127) /* mov off8(%rdi),%r9d */ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); @@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ALU_DIV_X: /* A /= X; */ seen |= SEEN_XREG; EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (pc_ret0 != -1) - EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); - else { + if (pc_ret0 > 0) { + /* addrs[pc_ret0 - 1] is start address of target + * (addrs[i] - 4) is the address following this jmp + * ("xor %edx,%edx; div %ebx" being 4 bytes long) + */ + EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] - + (addrs[i] - 4)); + } else { EMIT_COND_JMP(X86_JNE, 2 + 5); CLEAR_A(); EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ @@ -283,7 +289,7 @@ void bpf_jit_compile(struct sk_filter *fp) EMIT2(0x24, K & 0xFF); /* and imm8,%al */ } else if (K >= 0xFFFF0000) { EMIT2(0x66, 0x25); /* and imm16,%ax */ - EMIT2(K, 2); + EMIT(K, 2); } else { EMIT1_off32(0x25, K); /* and imm32,%eax */ } @@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp) } /* fallinto */ case BPF_S_RET_A: - if (seen) { + if (seen_or_pass0) { if (i != flen - 1) { EMIT_JMP(cleanup_addr - addrs[i]); break; } - if (seen & SEEN_XREG) + if (seen_or_pass0 & SEEN_XREG) EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ EMIT1(0xc9); /* leaveq */ } @@ -469,8 +475,10 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_LD_W_ABS: func = sk_load_word; common_load: seen |= SEEN_DATAREF; - if ((int)K < 0) + if ((int)K < 0) { + /* Abort the JIT because __load_pointer() is needed. */ goto out; + } t_offset = func - (image + addrs[i]); EMIT1_off32(0xbe, K); /* mov imm32,%esi */ EMIT1_off32(0xe8, t_offset); /* call */ @@ -483,13 +491,8 @@ common_load: seen |= SEEN_DATAREF; goto common_load; case BPF_S_LDX_B_MSH: if ((int)K < 0) { - if (pc_ret0 != -1) { - EMIT_JMP(addrs[pc_ret0] - addrs[i]); - break; - } - CLEAR_A(); - EMIT_JMP(cleanup_addr - addrs[i]); - break; + /* Abort the JIT because __load_pointer() is needed. */ + goto out; } seen |= SEEN_DATAREF | SEEN_XREG; t_offset = sk_load_byte_msh - (image + addrs[i]); @@ -599,13 +602,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 1; /* ret */ - if (seen) + if (seen_or_pass0) cleanup_addr -= 1; /* leaveq */ - if (seen & SEEN_XREG) + if (seen_or_pass0 & SEEN_XREG) cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ if (image) { - WARN_ON(proglen != oldproglen); + if (proglen != oldproglen) + pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); break; } if (proglen == oldproglen) { diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 68894fdc034..a00c588b69d 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, val |= counter_config->extra; event &= model->event_mask ? model->event_mask : 0xFF; val |= event & 0xFF; - val |= (event & 0x0F00) << 24; + val |= (u64)(event & 0x0F00) << 24; return val; } diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile index 6b8759f7634..d24d3da7292 100644 --- a/arch/x86/pci/Makefile +++ b/arch/x86/pci/Makefile @@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o obj-$(CONFIG_X86_MRST) += mrst.o obj-y += common.o early.o -obj-y += amd_bus.o bus_numa.o +obj-y += bus_numa.o +obj-$(CONFIG_AMD_NB) += amd_bus.o obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o ifeq ($(CONFIG_PCI_DEBUG),y) diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 50b3f14c59a..0473a8f9350 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -54,6 +54,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */ + { + .callback = set_use_crs, + .ident = "MSI MS-7253", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_BOARD_NAME, "MS-7253"), + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), + }, + }, {} }; @@ -149,7 +159,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data) struct acpi_resource_address64 addr; acpi_status status; unsigned long flags; - u64 start, end; + u64 start, orig_end, end; status = resource_to_addr(acpi_res, &addr); if (!ACPI_SUCCESS(status)) @@ -165,7 +175,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data) return AE_OK; start = addr.minimum + addr.translation_offset; - end = addr.maximum + addr.translation_offset; + orig_end = end = addr.maximum + addr.translation_offset; + + /* Exclude non-addressable range or non-addressable portion of range */ + end = min(end, (u64)iomem_resource.end); + if (end <= start) { + dev_info(&info->bridge->dev, + "host bridge window [%#llx-%#llx] " + "(ignored, not CPU addressable)\n", start, orig_end); + return AE_OK; + } else if (orig_end != end) { + dev_info(&info->bridge->dev, + "host bridge window [%#llx-%#llx] " + "([%#llx-%#llx] ignored, not CPU addressable)\n", + start, orig_end, end + 1, orig_end); + } res = &info->res[info->res_num]; res->name = info->name; diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 026e4931d16..385a940b542 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = { { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 }, }; -static u64 __initdata fam10h_mmconf_start; -static u64 __initdata fam10h_mmconf_end; -static void __init get_pci_mmcfg_amd_fam10h_range(void) -{ - u32 address; - u64 base, msr; - unsigned segn_busn_bits; - - /* assume all cpus from fam10h have mmconf */ - if (boot_cpu_data.x86 < 0x10) - return; - - address = MSR_FAM10H_MMIO_CONF_BASE; - rdmsrl(address, msr); - - /* mmconfig is not enable */ - if (!(msr & FAM10H_MMIO_CONF_ENABLE)) - return; - - base = msr & (FAM10H_MMIO_CONF_BASE_MASK<> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & - FAM10H_MMIO_CONF_BUSRANGE_MASK; - - fam10h_mmconf_start = base; - fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1; -} - #define RANGE_NUM 16 /** @@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void) u64 val; u32 address; bool found; + struct resource fam10h_mmconf_res, *fam10h_mmconf; + u64 fam10h_mmconf_start; + u64 fam10h_mmconf_end; if (!early_pci_allowed()) return -1; @@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void) subtract_range(range, RANGE_NUM, 0, end); /* get mmconfig */ - get_pci_mmcfg_amd_fam10h_range(); + fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res); /* need to take out mmconf range */ - if (fam10h_mmconf_end) { - printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end); + if (fam10h_mmconf) { + printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf); + fam10h_mmconf_start = fam10h_mmconf->start; + fam10h_mmconf_end = fam10h_mmconf->end; subtract_range(range, RANGE_NUM, fam10h_mmconf_start, fam10h_mmconf_end + 1); + } else { + fam10h_mmconf_start = 0; + fam10h_mmconf_end = 0; } /* mmio resource */ diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6dd89555fbf..0951b81ea90 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -521,3 +521,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar); + +/* + * Twinhead H12Y needs us to block out a region otherwise we map devices + * there and any access kills the box. + * + * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231 + * + * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor) + */ +static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev) +{ + if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) { + pr_info("Reserving memory on Twinhead H12Y\n"); + request_mem_region(0xFFB00000, 0x100000, "twinhead"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index f567965c062..6e96e65e7ca 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -308,7 +308,7 @@ int __init pci_xen_init(void) int __init pci_xen_hvm_init(void) { - if (!xen_feature(XENFEAT_hvm_pirqs)) + if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) return 0; #ifdef CONFIG_ACPI diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index ac3aa54e265..0fba86da589 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -38,7 +38,7 @@ #include #include -static pgd_t save_pgd __initdata; +static pgd_t *save_pgd __initdata; static unsigned long efi_flags __initdata; static void __init early_code_mapping_set_exec(int executable) @@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable) void __init efi_call_phys_prelog(void) { unsigned long vaddress; + int pgd; + int n_pgds; early_code_mapping_set_exec(1); local_irq_save(efi_flags); - vaddress = (unsigned long)__va(0x0UL); - save_pgd = *pgd_offset_k(0x0UL); - set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); + + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); + + for (pgd = 0; pgd < n_pgds; pgd++) { + save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); + vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); + } __flush_tlb_all(); } @@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void) /* * After the lock is released, the original page table is restored. */ - set_pgd(pgd_offset_k(0x0UL), save_pgd); + int pgd; + int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); + for (pgd = 0; pgd < n_pgds; pgd++) + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); + kfree(save_pgd); __flush_tlb_all(); local_irq_restore(efi_flags); early_code_mapping_set_exec(0); diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 82cff4a25f4..edf435b74e8 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1575,14 +1575,14 @@ static int calculate_destination_timeout(void) ts_ns = base * mult1 * mult2; ret = ts_ns / 1000; } else { - /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */ - mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); + /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ + mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL); mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) - mult1 = 80; + base = 80; else - mult1 = 10; - base = mmr_image & UV2_ACK_MASK; + base = 10; + mult1 = mmr_image & UV2_ACK_MASK; ret = mult1 * base; } return ret; @@ -1820,6 +1820,8 @@ static int __init uv_bau_init(void) uv_base_pnode = uv_blade_to_pnode(uvhub); } + enable_timeouts(); + if (init_per_cpu(nuvhubs, uv_base_pnode)) { nobau = 1; return 0; @@ -1830,7 +1832,6 @@ static int __init uv_bau_init(void) if (uv_blade_nr_possible_cpus(uvhub)) init_uvhub(uvhub, vector, uv_base_pnode); - enable_timeouts(); alloc_intr_gate(vector, uv_bau_message_intr1); for_each_possible_blade(uvhub) { diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 87bb35e34ef..0ea8bd233de 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -224,6 +225,7 @@ static void __restore_processor_state(struct saved_context *ctxt) do_fpu_end(); mtrr_bp_restore(); + perf_restore_debug_store(); } /* Needed by apm.c */ diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 3769079874d..a09ecb9582b 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -130,8 +130,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base) } } - resume_map_numa_kva(pgd_base); - return 0; } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0fb662abceb..e11efbdb998 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -62,6 +62,7 @@ #include #include #include +#include #include "xen-ops.h" #include "mmu.h" @@ -128,6 +129,21 @@ static void xen_vcpu_setup(int cpu) BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + /* + * This path is called twice on PVHVM - first during bootup via + * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being + * hotplugged: cpu_up -> xen_hvm_cpu_notify. + * As we can only do the VCPUOP_register_vcpu_info once lets + * not over-write its result. + * + * For PV it is called during restore (xen_vcpu_restore) and bootup + * (xen_setup_vcpu_info_placement). The hotplug mechanism does not + * use this function. + */ + if (xen_hvm_domain()) { + if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) + return; + } if (cpu < MAX_VIRT_CPUS) per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; @@ -197,6 +213,9 @@ static void __init xen_banner(void) xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); } +#define CPUID_THERM_POWER_LEAF 6 +#define APERFMPERF_PRESENT 0 + static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; @@ -217,6 +236,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, maskedx = cpuid_leaf1_edx_mask; break; + case CPUID_THERM_POWER_LEAF: + /* Disabling APERFMPERF for kernel usage */ + maskecx = ~(1 << APERFMPERF_PRESENT); + break; + case 0xb: /* Suppress extended topology stuff */ maskebx = 0; @@ -794,7 +818,16 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } - +#ifdef CONFIG_X86_64 +static inline unsigned long xen_read_cr8(void) +{ + return 0; +} +static inline void xen_write_cr8(unsigned long val) +{ + BUG_ON(val); +} +#endif static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -959,6 +992,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_cr4_safe = native_read_cr4_safe, .write_cr4 = xen_write_cr4, +#ifdef CONFIG_X86_64 + .read_cr8 = xen_read_cr8, + .write_cr8 = xen_write_cr8, +#endif + .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, @@ -966,6 +1004,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, + .read_tscp = native_read_tscp, + .iret = xen_iret, .irq_enable_sysexit = xen_sysexit, #ifdef CONFIG_X86_64 @@ -1259,8 +1299,10 @@ asmlinkage void __init xen_start_kernel(void) /* Make sure ACS will be enabled */ pci_request_acs(); } - - +#ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ + pci_probe &= ~PCI_PROBE_BIOS; +#endif xen_raw_console_write("about to get started...\n"); xen_setup_runstate_info(0); @@ -1338,8 +1380,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: xen_vcpu_setup(cpu); - if (xen_have_vector_callback) + if (xen_have_vector_callback) { xen_init_lock_cpu(cpu); + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + } break; default: break; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5f76c0acb2c..a0aed70596a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -320,8 +320,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + unsigned long pfn = mfn_to_pfn(mfn); + pteval_t flags = val & PTE_FLAGS_MASK; - val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; + if (unlikely(pfn == ~0)) + val = flags & ~_PAGE_PRESENT; + else + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; @@ -2006,6 +2011,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, + .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 58efeb9d544..2f7847d1055 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -683,6 +683,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; + int ret = 0; pfn = page_to_pfn(page); if (!PageHighMem(page)) { @@ -706,6 +707,24 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); + /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in + * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other + * pfn so that the following mfn_to_pfn(mfn) calls will return the + * pfn from the m2p_override (the backend pfn) instead. + * We need to do this because the pages shared by the frontend + * (xen-blkfront) can be already locked (lock_page, called by + * do_read_cache_page); when the userspace backend tries to use them + * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so + * do_blockdev_direct_IO is going to try to lock the same pages + * again resulting in a deadlock. + * As a side effect get_user_pages_fast might not be safe on the + * frontend pages while they are being shared with the backend, + * because mfn_to_pfn (that ends up being called by GUPF) will + * return the backend pfn rather than the frontend pfn. */ + ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); + if (ret == 0 && get_phys_to_machine(pfn) == mfn) + set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)); + return 0; } EXPORT_SYMBOL_GPL(m2p_add_override); @@ -717,6 +736,7 @@ int m2p_remove_override(struct page *page, bool clear_pte) unsigned long uninitialized_var(address); unsigned level; pte_t *ptep = NULL; + int ret = 0; pfn = page_to_pfn(page); mfn = get_phys_to_machine(pfn); @@ -743,6 +763,22 @@ int m2p_remove_override(struct page *page, bool clear_pte) /* No tlb flush necessary because the caller already * left the pte unmapped. */ + /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present + * somewhere in this domain, even before being added to the + * m2p_override (see comment above in m2p_add_override). + * If there are no other entries in the m2p_override corresponding + * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for + * the original pfn (the one shared by the frontend): the backend + * cannot do any IO on this page anymore because it has been + * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of + * the original pfn causes mfn_to_pfn(mfn) to return the frontend + * pfn again. */ + mfn &= ~FOREIGN_FRAME_BIT; + ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); + if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) && + m2p_find_override(mfn) == NULL) + set_phys_to_machine(pfn, mfn); + return 0; } EXPORT_SYMBOL_GPL(m2p_remove_override); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f8dcda49314..5669564aadf 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -463,4 +464,7 @@ void __init xen_arch_setup(void) boot_option_idle_override = IDLE_HALT; fiddle_vdso(); +#ifdef CONFIG_NUMA + numa_off = 1; +#endif } diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d4fc6d454f8..2843b5e7cf0 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -172,6 +172,7 @@ static void __init xen_fill_possible_map(void) static void __init xen_filter_cpu_maps(void) { int i, rc; + unsigned int subtract = 0; if (!xen_initial_domain()) return; @@ -186,8 +187,22 @@ static void __init xen_filter_cpu_maps(void) } else { set_cpu_possible(i, false); set_cpu_present(i, false); + subtract++; } } +#ifdef CONFIG_HOTPLUG_CPU + /* This is akin to using 'nr_cpus' on the Linux command line. + * Which is OK as when we use 'dom0_max_vcpus=X' we can only + * have up to X, while nr_cpu_ids is greater than X. This + * normally is not a problem, except when CPU hotplugging + * is involved and then there might be more than X CPUs + * in the guest - which will not work as there is no + * hypercall to expand the max number of VCPUs an already + * running guest has. So cap it up to X. */ + if (subtract) + nr_cpu_ids = nr_cpu_ids - subtract; +#endif + } static void __init xen_smp_prepare_boot_cpu(void) diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index cc9b1e182fc..d99537fb06a 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -313,7 +313,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) if (per_cpu(lock_spinners, cpu) == xl) { ADD_STATS(released_slow_kicked, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); - break; } } } diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5158c505bef..4b0fb291555 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -482,7 +482,11 @@ static void xen_hvm_setup_cpu_clockevents(void) { int cpu = smp_processor_id(); xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); + /* + * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence + * doing it xen_hvm_cpu_notify (which gets called by smp_init during + * early bootup and also during CPU hotplug events). + */ xen_setup_cpu_clockevents(); } diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 79d7362ad6d..3e45aa00071 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct) /* check for unmasked and pending */ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending - jz 1f + jnz 1f 2: call check_events 1: ENDPATCH(xen_restore_fl_direct) diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index b040b0e518c..7328f71651e 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -88,11 +88,11 @@ ENTRY(xen_iret) */ #ifdef CONFIG_SMP GET_THREAD_INFO(%eax) - movl TI_cpu(%eax), %eax - movl __per_cpu_offset(,%eax,4), %eax - mov xen_vcpu(%eax), %eax + movl %ss:TI_cpu(%eax), %eax + movl %ss:__per_cpu_offset(,%eax,4), %eax + mov %ss:xen_vcpu(%eax), %eax #else - movl xen_vcpu, %eax + movl %ss:xen_vcpu, %eax #endif /* check IF state we're restoring */ @@ -105,11 +105,11 @@ ENTRY(xen_iret) * resuming the code, so we don't have to be worried about * being preempted to another CPU. */ - setz XEN_vcpu_info_mask(%eax) + setz %ss:XEN_vcpu_info_mask(%eax) xen_iret_start_crit: /* check for unmasked and pending */ - cmpw $0x0001, XEN_vcpu_info_pending(%eax) + cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) /* * If there's something pending, mask events again so we can @@ -117,7 +117,7 @@ xen_iret_start_crit: * touch XEN_vcpu_info_mask. */ jne 1f - movb $1, XEN_vcpu_info_mask(%eax) + movb $1, %ss:XEN_vcpu_info_mask(%eax) 1: popl %eax diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h index 633ba73bc4d..75edf8a452b 100644 --- a/arch/xtensa/include/asm/signal.h +++ b/arch/xtensa/include/asm/signal.h @@ -133,6 +133,7 @@ struct sigaction { void (*sa_restorer)(void); sigset_t sa_mask; /* mask last for extensibility */ }; +#define __ARCH_HAS_SA_RESTORER struct k_sigaction { struct sigaction sa; diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 3199b76f795..3f6e6cc7520 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -21,6 +21,17 @@ config IOSCHED_DEADLINE a new point in the service tree and doing a batch of IO from there in case of expiry. +config IOSCHED_ROW + tristate "ROW I/O scheduler" + default y + ---help--- + The ROW I/O scheduler gives priority to READ requests over the + WRITE requests when dispatching, without starving WRITE requests. + Requests are kept in priority queues. Dispatching is done in a RR + manner when the dispatch quantum for each queue is calculated + according to queue priority. + Most suitable for mobile devices. + config IOSCHED_CFQ tristate "CFQ I/O scheduler" # If BLK_CGROUP is a module, CFQ has to be built as module. @@ -53,6 +64,16 @@ choice config DEFAULT_DEADLINE bool "Deadline" if IOSCHED_DEADLINE=y + config DEFAULT_ROW + bool "ROW" if IOSCHED_ROW=y + help + The ROW I/O scheduler gives priority to READ requests + over the WRITE requests when dispatching, without starving + WRITE requests. Requests are kept in priority queues. + Dispatching is done in a RR manner when the dispatch quantum + for each queue is defined according to queue priority. + Most suitable for mobile devices. + config DEFAULT_CFQ bool "CFQ" if IOSCHED_CFQ=y @@ -64,6 +85,7 @@ endchoice config DEFAULT_IOSCHED string default "deadline" if DEFAULT_DEADLINE + default "row" if DEFAULT_ROW default "cfq" if DEFAULT_CFQ default "noop" if DEFAULT_NOOP diff --git a/block/Makefile b/block/Makefile index 0fec4b3fab5..824ae82cd20 100644 --- a/block/Makefile +++ b/block/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o +obj-$(CONFIG_IOSCHED_ROW) += row-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o diff --git a/block/blk-core.c b/block/blk-core.c index 64cc22daac7..0b62cf8f2a0 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -296,13 +296,26 @@ EXPORT_SYMBOL(blk_sync_queue); * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. + * Device driver will be notified of an urgent request + * pending under the following conditions: + * 1. The driver and the current scheduler support urgent reques handling + * 2. There is an urgent request pending in the scheduler + * 3. There isn't already an urgent request in flight, meaning previously + * notified urgent request completed (!q->notified_urgent) */ void __blk_run_queue(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; - q->request_fn(q); + if (!q->notified_urgent && + q->elevator->elevator_type->ops.elevator_is_urgent_fn && + q->urgent_request_fn && + q->elevator->elevator_type->ops.elevator_is_urgent_fn(q)) { + q->notified_urgent = true; + q->urgent_request_fn(q); + } else + q->request_fn(q); } EXPORT_SYMBOL(__blk_run_queue); @@ -929,6 +942,50 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) } EXPORT_SYMBOL(blk_requeue_request); +/** + * blk_reinsert_request() - Insert a request back to the scheduler + * @q: request queue + * @rq: request to be inserted + * + * This function inserts the request back to the scheduler as if + * it was never dispatched. + * + * Return: 0 on success, error code on fail + */ +int blk_reinsert_request(struct request_queue *q, struct request *rq) +{ + if (unlikely(!rq) || unlikely(!q)) + return -EIO; + + blk_delete_timer(rq); + blk_clear_rq_complete(rq); + trace_block_rq_requeue(q, rq); + + if (blk_rq_tagged(rq)) + blk_queue_end_tag(q, rq); + + BUG_ON(blk_queued_rq(rq)); + + return elv_reinsert_request(q, rq); +} +EXPORT_SYMBOL(blk_reinsert_request); + +/** + * blk_reinsert_req_sup() - check whether the scheduler supports + * reinsertion of requests + * @q: request queue + * + * Returns true if the current scheduler supports reinserting + * request. False otherwise + */ +bool blk_reinsert_req_sup(struct request_queue *q) +{ + if (unlikely(!q)) + return false; + return q->elevator->elevator_type->ops.elevator_reinsert_req_fn ? true : false; +} +EXPORT_SYMBOL(blk_reinsert_req_sup); + static void add_acct_request(struct request_queue *q, struct request *rq, int where) { @@ -1972,8 +2029,17 @@ struct request *blk_fetch_request(struct request_queue *q) struct request *rq; rq = blk_peek_request(q); - if (rq) + if (rq) { + /* + * Assumption: the next request fetched from scheduler after we + * notified "urgent request pending" - will be the urgent one + */ + if (q->notified_urgent && !q->dispatched_urgent) { + q->dispatched_urgent = true; + (void)blk_mark_rq_urgent(rq); + } blk_start_request(rq); + } return rq; } EXPORT_SYMBOL(blk_fetch_request); diff --git a/block/blk-settings.c b/block/blk-settings.c index fa1eb0449a0..7d3ee7fa50d 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -99,6 +99,18 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) } EXPORT_SYMBOL_GPL(blk_queue_lld_busy); +/** + * blk_urgent_request() - Set an urgent_request handler function for queue + * @q: queue + * @fn: handler for urgent requests + * + */ +void blk_urgent_request(struct request_queue *q, request_fn_proc *fn) +{ + q->urgent_request_fn = fn; +} +EXPORT_SYMBOL(blk_urgent_request); + /** * blk_set_default_limits - reset limits to default values * @lim: the queue_limits structure to reset diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 45c56d86b82..cb0f1a093e1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -200,6 +200,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ + if (ret < 0) \ + return ret; \ if (neg) \ val = !val; \ \ diff --git a/block/blk.h b/block/blk.h index d6586287adc..2285f51c9ab 100644 --- a/block/blk.h +++ b/block/blk.h @@ -28,6 +28,7 @@ void __generic_unplug_device(struct request_queue *); */ enum rq_atomic_flags { REQ_ATOM_COMPLETE = 0, + REQ_ATOM_URGENT = 1, }; /* @@ -44,6 +45,16 @@ static inline void blk_clear_rq_complete(struct request *rq) clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } +static inline int blk_mark_rq_urgent(struct request *rq) +{ + return test_and_set_bit(REQ_ATOM_URGENT, &rq->atomic_flags); +} + +static inline void blk_clear_rq_urgent(struct request *rq) +{ + clear_bit(REQ_ATOM_URGENT, &rq->atomic_flags); +} + /* * Internal elevator interface */ diff --git a/block/bsg.c b/block/bsg.c index 0c8b64a1648..792ead66675 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -985,7 +985,8 @@ void bsg_unregister_queue(struct request_queue *q) mutex_lock(&bsg_mutex); idr_remove(&bsg_minor_idr, bcd->minor); - sysfs_remove_link(&q->kobj, "bsg"); + if (q->kobj.sd) + sysfs_remove_link(&q->kobj, "bsg"); device_unregister(bcd->class_dev); bcd->class_dev = NULL; kref_put(&bcd->ref, bsg_kref_release_function); diff --git a/block/elevator.c b/block/elevator.c index b0b38ce0dcb..b328d8ab5ea 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -606,6 +606,41 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); } +/** + * elv_reinsert_request() - Insert a request back to the scheduler + * @q: request queue where request should be inserted + * @rq: request to be inserted + * + * This function returns the request back to the scheduler to be + * inserted as if it was never dispatched + * + * Return: 0 on success, error code on failure + */ +int elv_reinsert_request(struct request_queue *q, struct request *rq) +{ + int res; + + if (!q->elevator->elevator_type->ops.elevator_reinsert_req_fn) + return -EPERM; + + res = q->elevator->elevator_type->ops.elevator_reinsert_req_fn(q, rq); + if (!res) { + /* + * it already went through dequeue, we need to decrement the + * in_flight count again + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]--; + if (rq->cmd_flags & REQ_SORTED) + elv_deactivate_rq(q, rq); + } + rq->cmd_flags &= ~REQ_STARTED; + q->nr_sorted++; + } + + return res; +} + void elv_drain_elevator(struct request_queue *q) { static int printed; @@ -810,6 +845,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; + if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) { + q->notified_urgent = false; + q->dispatched_urgent = false; + blk_clear_rq_urgent(rq); + } /* * request is released from the driver, io must be done */ diff --git a/block/genhd.c b/block/genhd.c index 3675601b7e7..9981aac3ca2 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -36,6 +36,7 @@ static DEFINE_IDR(ext_devt_idr); static struct device_type disk_type; +static void disk_alloc_events(struct gendisk *disk); static void disk_add_events(struct gendisk *disk); static void disk_del_events(struct gendisk *disk); static void disk_release_events(struct gendisk *disk); @@ -602,6 +603,8 @@ void add_disk(struct gendisk *disk) disk->major = MAJOR(devt); disk->first_minor = MINOR(devt); + disk_alloc_events(disk); + /* Register BDI before referencing it from bdev */ bdi = &disk->queue->backing_dev_info; bdi_register_dev(bdi, disk_devt(disk)); @@ -787,7 +790,7 @@ void __init printk_all_partitions(void) struct hd_struct *part; char name_buf[BDEVNAME_SIZE]; char devt_buf[BDEVT_SIZE]; - u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1]; + char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5]; /* * Don't show empty devices or things that have been @@ -806,14 +809,16 @@ void __init printk_all_partitions(void) while ((part = disk_part_iter_next(&piter))) { bool is_part0 = part == &disk->part0; - uuid[0] = 0; + uuid_buf[0] = '\0'; if (part->info) - part_unpack_uuid(part->info->uuid, uuid); + snprintf(uuid_buf, sizeof(uuid_buf), "%pU", + part->info->uuid); printk("%s%s %10llu %s %s", is_part0 ? "" : " ", bdevt_str(part_devt(part), devt_buf), (unsigned long long)part->nr_sects >> 1, - disk_name(disk, part->partno, name_buf), uuid); + disk_name(disk, part->partno, name_buf), + uuid_buf); if (is_part0) { if (disk->driverfs_dev != NULL && disk->driverfs_dev->driver != NULL) @@ -1547,9 +1552,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) intv = disk_events_poll_jiffies(disk); set_timer_slack(&ev->dwork.timer, intv / 4); if (check_now) - queue_delayed_work(system_nrt_wq, &ev->dwork, 0); + queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); else if (intv) - queue_delayed_work(system_nrt_wq, &ev->dwork, intv); + queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); out_unlock: spin_unlock_irqrestore(&ev->lock, flags); } @@ -1590,7 +1595,7 @@ void disk_check_events(struct gendisk *disk) spin_lock_irqsave(&ev->lock, flags); if (!ev->block) { cancel_delayed_work(&ev->dwork); - queue_delayed_work(system_nrt_wq, &ev->dwork, 0); + queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); } spin_unlock_irqrestore(&ev->lock, flags); } @@ -1628,7 +1633,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) /* uncondtionally schedule event check and wait for it to finish */ disk_block_events(disk); - queue_delayed_work(system_nrt_wq, &ev->dwork, 0); + queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); flush_delayed_work(&ev->dwork); __disk_unblock_events(disk, false); @@ -1665,7 +1670,7 @@ static void disk_events_workfn(struct work_struct *work) intv = disk_events_poll_jiffies(disk); if (!ev->block && intv) - queue_delayed_work(system_nrt_wq, &ev->dwork, intv); + queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); spin_unlock_irq(&ev->lock); @@ -1803,9 +1808,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops, &disk_events_dfl_poll_msecs, 0644); /* - * disk_{add|del|release}_events - initialize and destroy disk_events. + * disk_{alloc|add|del|release}_events - initialize and destroy disk_events. */ -static void disk_add_events(struct gendisk *disk) +static void disk_alloc_events(struct gendisk *disk) { struct disk_events *ev; @@ -1818,16 +1823,6 @@ static void disk_add_events(struct gendisk *disk) return; } - if (sysfs_create_files(&disk_to_dev(disk)->kobj, - disk_events_attrs) < 0) { - pr_warn("%s: failed to create sysfs files for events\n", - disk->disk_name); - kfree(ev); - return; - } - - disk->ev = ev; - INIT_LIST_HEAD(&ev->node); ev->disk = disk; spin_lock_init(&ev->lock); @@ -1836,8 +1831,21 @@ static void disk_add_events(struct gendisk *disk) ev->poll_msecs = -1; INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); + disk->ev = ev; +} + +static void disk_add_events(struct gendisk *disk) +{ + if (!disk->ev) + return; + + /* FIXME: error handling */ + if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0) + pr_warn("%s: failed to create sysfs files for events\n", + disk->disk_name); + mutex_lock(&disk_events_mutex); - list_add_tail(&ev->node, &disk_events); + list_add_tail(&disk->ev->node, &disk_events); mutex_unlock(&disk_events_mutex); /* diff --git a/block/row-iosched.c b/block/row-iosched.c new file mode 100644 index 00000000000..a67ec325af0 --- /dev/null +++ b/block/row-iosched.c @@ -0,0 +1,790 @@ +/* + * ROW (Read Over Write) I/O scheduler. + * + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* See Documentation/block/row-iosched.txt */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * enum row_queue_prio - Priorities of the ROW queues + * + * This enum defines the priorities (and the number of queues) + * the requests will be disptributed to. The higher priority - + * the bigger is the dispatch quantum given to that queue. + * ROWQ_PRIO_HIGH_READ - is the higher priority queue. + * + */ +enum row_queue_prio { + ROWQ_PRIO_HIGH_READ = 0, + ROWQ_PRIO_REG_READ, + ROWQ_PRIO_HIGH_SWRITE, + ROWQ_PRIO_REG_SWRITE, + ROWQ_PRIO_REG_WRITE, + ROWQ_PRIO_LOW_READ, + ROWQ_PRIO_LOW_SWRITE, + ROWQ_MAX_PRIO, +}; + +/** + * struct row_queue_params - ROW queue parameters + * @idling_enabled: Flag indicating whether idling is enable on + * the queue + * @quantum: Number of requests to be dispatched from this queue + * in a dispatch cycle + * @is_urgent: Flags indicating whether the queue can notify on + * urgent requests + * + */ +struct row_queue_params { + bool idling_enabled; + int quantum; + bool is_urgent; +}; + +/* + * This array holds the default values of the different configurables + * for each ROW queue. Each row of the array holds the following values: + * {idling_enabled, quantum, is_urgent} + * Each row corresponds to a queue with the same index (according to + * enum row_queue_prio) + */ +static const struct row_queue_params row_queues_def[] = { +/* idling_enabled, quantum, is_urgent */ + {true, 100, true}, /* ROWQ_PRIO_HIGH_READ */ + {true, 100, true}, /* ROWQ_PRIO_REG_READ */ + {false, 2, false}, /* ROWQ_PRIO_HIGH_SWRITE */ + {false, 1, false}, /* ROWQ_PRIO_REG_SWRITE */ + {false, 1, false}, /* ROWQ_PRIO_REG_WRITE */ + {false, 1, false}, /* ROWQ_PRIO_LOW_READ */ + {false, 1, false} /* ROWQ_PRIO_LOW_SWRITE */ +}; + +/* Default values for idling on read queues (in msec) */ +#define ROW_IDLE_TIME_MSEC 5 +#define ROW_READ_FREQ_MSEC 20 + +/** + * struct rowq_idling_data - parameters for idling on the queue + * @last_insert_time: time the last request was inserted + * to the queue + * @begin_idling: flag indicating wether we should idle + * + */ +struct rowq_idling_data { + ktime_t last_insert_time; + bool begin_idling; +}; + +/** + * struct row_queue - requests grouping structure + * @rdata: parent row_data structure + * @fifo: fifo of requests + * @prio: queue priority (enum row_queue_prio) + * @nr_dispatched: number of requests already dispatched in + * the current dispatch cycle + * @slice: number of requests to dispatch in a cycle + * @nr_req: number of requests in queue + * @dispatch quantum: number of requests this queue may + * dispatch in a dispatch cycle + * @idle_data: data for idling on queues + * + */ +struct row_queue { + struct row_data *rdata; + struct list_head fifo; + enum row_queue_prio prio; + + unsigned int nr_dispatched; + unsigned int slice; + + unsigned int nr_req; + int disp_quantum; + + /* used only for READ queues */ + struct rowq_idling_data idle_data; +}; + +/** + * struct idling_data - data for idling on empty rqueue + * @idle_time: idling duration (jiffies) + * @freq: min time between two requests that + * triger idling (msec) + * @idle_work: pointer to struct delayed_work + * + */ +struct idling_data { + unsigned long idle_time; + u32 freq; + + struct workqueue_struct *idle_workqueue; + struct delayed_work idle_work; +}; + +/** + * struct row_queue - Per block device rqueue structure + * @dispatch_queue: dispatch rqueue + * @row_queues: array of priority request queues + * @curr_queue: index in the row_queues array of the + * currently serviced rqueue + * @read_idle: data for idling after READ request + * @nr_reqs: nr_reqs[0] holds the number of all READ requests in + * scheduler, nr_reqs[1] holds the number of all WRITE + * requests in scheduler + * @cycle_flags: used for marking unserved queueus + * + */ +struct row_data { + struct request_queue *dispatch_queue; + + struct row_queue row_queues[ROWQ_MAX_PRIO]; + + enum row_queue_prio curr_queue; + + struct idling_data read_idle; + unsigned int nr_reqs[2]; + + unsigned int cycle_flags; +}; + +#define RQ_ROWQ(rq) ((struct row_queue *) ((rq)->elevator_private[0])) + +#define row_log(q, fmt, args...) \ + blk_add_trace_msg(q, "%s():" fmt , __func__, ##args) +#define row_log_rowq(rdata, rowq_id, fmt, args...) \ + blk_add_trace_msg(rdata->dispatch_queue, "rowq%d " fmt, \ + rowq_id, ##args) + +static inline void row_mark_rowq_unserved(struct row_data *rd, + enum row_queue_prio qnum) +{ + rd->cycle_flags |= (1 << qnum); +} + +static inline void row_clear_rowq_unserved(struct row_data *rd, + enum row_queue_prio qnum) +{ + rd->cycle_flags &= ~(1 << qnum); +} + +static inline int row_rowq_unserved(struct row_data *rd, + enum row_queue_prio qnum) +{ + return rd->cycle_flags & (1 << qnum); +} + +static inline void __maybe_unused row_dump_queues_stat(struct row_data *rd) +{ + int i; + + row_log(rd->dispatch_queue, " Queues status:"); + for (i = 0; i < ROWQ_MAX_PRIO; i++) + row_log(rd->dispatch_queue, + "queue%d: dispatched= %d, nr_req=%d", i, + rd->row_queues[i].nr_dispatched, + rd->row_queues[i].nr_req); +} + +/******************** Static helper functions ***********************/ +/* + * kick_queue() - Wake up device driver queue thread + * @work: pointer to struct work_struct + * + * This is a idling delayed work function. It's purpose is to wake up the + * device driver in order for it to start fetching requests. + * + */ +static void kick_queue(struct work_struct *work) +{ + struct delayed_work *idle_work = to_delayed_work(work); + struct idling_data *read_data = + container_of(idle_work, struct idling_data, idle_work); + struct row_data *rd = + container_of(read_data, struct row_data, read_idle); + + row_log_rowq(rd, rd->curr_queue, "Performing delayed work"); + /* Mark idling process as done */ + rd->row_queues[rd->curr_queue].idle_data.begin_idling = false; + + if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) + row_log(rd->dispatch_queue, "No requests in scheduler"); + else { + spin_lock_irq(rd->dispatch_queue->queue_lock); + __blk_run_queue(rd->dispatch_queue); + spin_unlock_irq(rd->dispatch_queue->queue_lock); + } +} + +/* + * row_restart_disp_cycle() - Restart the dispatch cycle + * @rd: pointer to struct row_data + * + * This function restarts the dispatch cycle by: + * - Setting current queue to ROWQ_PRIO_HIGH_READ + * - For each queue: reset the number of requests dispatched in + * the cycle + */ +static inline void row_restart_disp_cycle(struct row_data *rd) +{ + int i; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) + rd->row_queues[i].nr_dispatched = 0; + + rd->curr_queue = ROWQ_PRIO_HIGH_READ; + row_log(rd->dispatch_queue, "Restarting cycle"); +} + +static inline void row_get_next_queue(struct row_data *rd) +{ + rd->curr_queue++; + if (rd->curr_queue == ROWQ_MAX_PRIO) + row_restart_disp_cycle(rd); +} + +/******************* Elevator callback functions *********************/ + +/* + * row_add_request() - Add request to the scheduler + * @q: requests queue + * @rq: request to add + * + */ +static void row_add_request(struct request_queue *q, + struct request *rq) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + struct row_queue *rqueue = RQ_ROWQ(rq); + + list_add_tail(&rq->queuelist, &rqueue->fifo); + rd->nr_reqs[rq_data_dir(rq)]++; + rqueue->nr_req++; + rq_set_fifo_time(rq, jiffies); /* for statistics*/ + + if (row_queues_def[rqueue->prio].idling_enabled) { + if (delayed_work_pending(&rd->read_idle.idle_work)) + (void)cancel_delayed_work( + &rd->read_idle.idle_work); + if (ktime_to_ms(ktime_sub(ktime_get(), + rqueue->idle_data.last_insert_time)) < + rd->read_idle.freq) { + rqueue->idle_data.begin_idling = true; + row_log_rowq(rd, rqueue->prio, "Enable idling"); + } else { + rqueue->idle_data.begin_idling = false; + row_log_rowq(rd, rqueue->prio, "Disable idling"); + } + + rqueue->idle_data.last_insert_time = ktime_get(); + } + if (row_queues_def[rqueue->prio].is_urgent && + row_rowq_unserved(rd, rqueue->prio)) { + row_log_rowq(rd, rqueue->prio, + "added urgent request (total on queue=%d)", + rqueue->nr_req); + } else + row_log_rowq(rd, rqueue->prio, + "added request (total on queue=%d)", rqueue->nr_req); +} + +/** + * row_reinsert_req() - Reinsert request back to the scheduler + * @q: requests queue + * @rq: request to add + * + * Reinsert the given request back to the queue it was + * dispatched from as if it was never dispatched. + * + * Returns 0 on success, error code otherwise + */ +static int row_reinsert_req(struct request_queue *q, + struct request *rq) +{ + struct row_data *rd = q->elevator->elevator_data; + struct row_queue *rqueue = RQ_ROWQ(rq); + + /* Verify rqueue is legitimate */ + if (rqueue->prio >= ROWQ_MAX_PRIO) { + pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n", + rqueue->prio); + blk_dump_rq_flags(rq, ""); + return -EIO; + } + + list_add(&rq->queuelist, &rqueue->fifo); + rd->nr_reqs[rq_data_dir(rq)]++; + rqueue->nr_req++; + + row_log_rowq(rd, rqueue->prio, + "request reinserted (total on queue=%d)", rqueue->nr_req); + + return 0; +} + +/** + * row_urgent_pending() - Return TRUE if there is an urgent + * request on scheduler + * @q: requests queue + */ +static bool row_urgent_pending(struct request_queue *q) +{ + struct row_data *rd = q->elevator->elevator_data; + int i; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) + if (row_queues_def[i].is_urgent && row_rowq_unserved(rd, i) && + !list_empty(&rd->row_queues[i].fifo)) { + row_log_rowq(rd, i, + "Urgent request pending (curr=%i)", + rd->curr_queue); + return true; + } + + return false; +} + +/** + * row_remove_request() - Remove given request from scheduler + * @q: requests queue + * @rq: request to remove + * + */ +static void row_remove_request(struct request_queue *q, + struct request *rq) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + struct row_queue *rqueue = RQ_ROWQ(rq); + + rq_fifo_clear(rq); + rqueue->nr_req--; + rd->nr_reqs[rq_data_dir(rq)]--; +} + +/* + * row_dispatch_insert() - move request to dispatch queue + * @rd: pointer to struct row_data + * + * This function moves the next request to dispatch from + * rd->curr_queue to the dispatch queue + * + */ +static void row_dispatch_insert(struct row_data *rd) +{ + struct request *rq; + + rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].fifo.next); + row_remove_request(rd->dispatch_queue, rq); + elv_dispatch_add_tail(rd->dispatch_queue, rq); + rd->row_queues[rd->curr_queue].nr_dispatched++; + row_clear_rowq_unserved(rd, rd->curr_queue); + row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d", + rd->row_queues[rd->curr_queue].nr_dispatched); +} + +/* + * row_choose_queue() - choose the next queue to dispatch from + * @rd: pointer to struct row_data + * + * Updates rd->curr_queue. Returns 1 if there are requests to + * dispatch, 0 if there are no requests in scheduler + * + */ +static int row_choose_queue(struct row_data *rd) +{ + int prev_curr_queue = rd->curr_queue; + + if (!(rd->nr_reqs[0] + rd->nr_reqs[1])) { + row_log(rd->dispatch_queue, "No more requests in scheduler"); + return 0; + } + + row_get_next_queue(rd); + + /* + * Loop over all queues to find the next queue that is not empty. + * Stop when you get back to curr_queue + */ + while (list_empty(&rd->row_queues[rd->curr_queue].fifo) + && rd->curr_queue != prev_curr_queue) { + /* Mark rqueue as unserved */ + row_mark_rowq_unserved(rd, rd->curr_queue); + row_get_next_queue(rd); + } + + return 1; +} + +/* + * row_dispatch_requests() - selects the next request to dispatch + * @q: requests queue + * @force: ignored + * + * Return 0 if no requests were moved to the dispatch queue. + * 1 otherwise + * + */ +static int row_dispatch_requests(struct request_queue *q, int force) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + int ret = 0, currq, i; + + currq = rd->curr_queue; + + /* + * Find the first unserved queue (with higher priority then currq) + * that is not empty + */ + for (i = 0; i < currq; i++) { + if (row_rowq_unserved(rd, i) && + !list_empty(&rd->row_queues[i].fifo)) { + row_log_rowq(rd, currq, + " Preemting for unserved rowq%d. (nr_req=%u)", + i, rd->row_queues[currq].nr_req); + rd->curr_queue = i; + row_dispatch_insert(rd); + ret = 1; + goto done; + } + } + + if (rd->row_queues[currq].nr_dispatched >= + rd->row_queues[currq].disp_quantum) { + rd->row_queues[currq].nr_dispatched = 0; + row_log_rowq(rd, currq, "Expiring rqueue"); + ret = row_choose_queue(rd); + if (ret) + row_dispatch_insert(rd); + goto done; + } + + /* Dispatch from curr_queue */ + if (list_empty(&rd->row_queues[currq].fifo)) { + /* check idling */ + if (delayed_work_pending(&rd->read_idle.idle_work)) { + if (force) { + (void)cancel_delayed_work( + &rd->read_idle.idle_work); + row_log_rowq(rd, currq, + "Canceled delayed work - forced dispatch"); + } else { + row_log_rowq(rd, currq, + "Delayed work pending. Exiting"); + goto done; + } + } + + if (!force && row_queues_def[currq].idling_enabled && + rd->row_queues[currq].idle_data.begin_idling) { + if (!queue_delayed_work(rd->read_idle.idle_workqueue, + &rd->read_idle.idle_work, + rd->read_idle.idle_time)) { + row_log_rowq(rd, currq, + "Work already on queue!"); + pr_err("ROW_BUG: Work already on queue!"); + } else + row_log_rowq(rd, currq, + "Scheduled delayed work. exiting"); + goto done; + } else { + row_log_rowq(rd, currq, + "Currq empty. Choose next queue"); + ret = row_choose_queue(rd); + if (!ret) + goto done; + } + } + + ret = 1; + row_dispatch_insert(rd); + +done: + return ret; +} + +/* + * row_init_queue() - Init scheduler data structures + * @q: requests queue + * + * Return pointer to struct row_data to be saved in elevator for + * this dispatch queue + * + */ +static void *row_init_queue(struct request_queue *q) +{ + + struct row_data *rdata; + int i; + + rdata = kmalloc_node(sizeof(*rdata), + GFP_KERNEL | __GFP_ZERO, q->node); + if (!rdata) + return NULL; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) { + INIT_LIST_HEAD(&rdata->row_queues[i].fifo); + rdata->row_queues[i].disp_quantum = row_queues_def[i].quantum; + rdata->row_queues[i].rdata = rdata; + rdata->row_queues[i].prio = i; + rdata->row_queues[i].idle_data.begin_idling = false; + rdata->row_queues[i].idle_data.last_insert_time = + ktime_set(0, 0); + } + + /* + * Currently idling is enabled only for READ queues. If we want to + * enable it for write queues also, note that idling frequency will + * be the same in both cases + */ + rdata->read_idle.idle_time = msecs_to_jiffies(ROW_IDLE_TIME_MSEC); + /* Maybe 0 on some platforms */ + if (!rdata->read_idle.idle_time) + rdata->read_idle.idle_time = 1; + rdata->read_idle.freq = ROW_READ_FREQ_MSEC; + rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (!rdata->read_idle.idle_workqueue) + panic("Failed to create idle workqueue\n"); + INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue); + + rdata->curr_queue = ROWQ_PRIO_HIGH_READ; + rdata->dispatch_queue = q; + + rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0; + + return rdata; +} + +/* + * row_exit_queue() - called on unloading the RAW scheduler + * @e: poiner to struct elevator_queue + * + */ +static void row_exit_queue(struct elevator_queue *e) +{ + struct row_data *rd = (struct row_data *)e->elevator_data; + int i; + + for (i = 0; i < ROWQ_MAX_PRIO; i++) + BUG_ON(!list_empty(&rd->row_queues[i].fifo)); + (void)cancel_delayed_work_sync(&rd->read_idle.idle_work); + BUG_ON(delayed_work_pending(&rd->read_idle.idle_work)); + destroy_workqueue(rd->read_idle.idle_workqueue); + kfree(rd); +} + +/* + * row_merged_requests() - Called when 2 requests are merged + * @q: requests queue + * @rq: request the two requests were merged into + * @next: request that was merged + */ +static void row_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct row_queue *rqueue = RQ_ROWQ(next); + + list_del_init(&next->queuelist); + rqueue->nr_req--; + + rqueue->rdata->nr_reqs[rq_data_dir(rq)]--; +} + +/* + * get_queue_type() - Get queue type for a given request + * + * This is a helping function which purpose is to determine what + * ROW queue the given request should be added to (and + * dispatched from leter on) + * + * TODO: Right now only 3 queues are used REG_READ, REG_WRITE + * and REG_SWRITE + */ +static enum row_queue_prio get_queue_type(struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + const bool is_sync = rq_is_sync(rq); + + if (data_dir == READ) + return ROWQ_PRIO_REG_READ; + else if (is_sync) + return ROWQ_PRIO_REG_SWRITE; + else + return ROWQ_PRIO_REG_WRITE; +} + +/* + * row_set_request() - Set ROW data structures associated with this request. + * @q: requests queue + * @rq: pointer to the request + * @gfp_mask: ignored + * + */ +static int +row_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) +{ + struct row_data *rd = (struct row_data *)q->elevator->elevator_data; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + rq->elevator_private[0] = + (void *)(&rd->row_queues[get_queue_type(rq)]); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 0; +} + +/********** Helping sysfs functions/defenitions for ROW attributes ******/ +static ssize_t row_var_show(int var, char *page) +{ + return snprintf(page, 100, "%d\n", var); +} + +static ssize_t row_var_store(int *var, const char *page, size_t count) +{ + int err; + err = kstrtoul(page, 10, (unsigned long *)var); + + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct row_data *rowd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return row_var_show(__data, (page)); \ +} +SHOW_FUNCTION(row_hp_read_quantum_show, + rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 0); +SHOW_FUNCTION(row_rp_read_quantum_show, + rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, 0); +SHOW_FUNCTION(row_hp_swrite_quantum_show, + rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, 0); +SHOW_FUNCTION(row_rp_swrite_quantum_show, + rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, 0); +SHOW_FUNCTION(row_rp_write_quantum_show, + rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, 0); +SHOW_FUNCTION(row_lp_read_quantum_show, + rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, 0); +SHOW_FUNCTION(row_lp_swrite_quantum_show, + rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, 0); +SHOW_FUNCTION(row_read_idle_show, rowd->read_idle.idle_time, 0); +SHOW_FUNCTION(row_read_idle_freq_show, rowd->read_idle.freq, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, \ + const char *page, size_t count) \ +{ \ + struct row_data *rowd = e->elevator_data; \ + int __data; \ + int ret = row_var_store(&__data, (page), count); \ + if (__CONV) \ + __data = (int)msecs_to_jiffies(__data); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(row_hp_read_quantum_store, +&rowd->row_queues[ROWQ_PRIO_HIGH_READ].disp_quantum, 1, INT_MAX, 0); +STORE_FUNCTION(row_rp_read_quantum_store, + &rowd->row_queues[ROWQ_PRIO_REG_READ].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_hp_swrite_quantum_store, + &rowd->row_queues[ROWQ_PRIO_HIGH_SWRITE].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_rp_swrite_quantum_store, + &rowd->row_queues[ROWQ_PRIO_REG_SWRITE].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_rp_write_quantum_store, + &rowd->row_queues[ROWQ_PRIO_REG_WRITE].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_lp_read_quantum_store, + &rowd->row_queues[ROWQ_PRIO_LOW_READ].disp_quantum, + 1, INT_MAX, 0); +STORE_FUNCTION(row_lp_swrite_quantum_store, + &rowd->row_queues[ROWQ_PRIO_LOW_SWRITE].disp_quantum, + 1, INT_MAX, 1); +STORE_FUNCTION(row_read_idle_store, &rowd->read_idle.idle_time, 1, INT_MAX, 0); +STORE_FUNCTION(row_read_idle_freq_store, &rowd->read_idle.freq, 1, INT_MAX, 0); + +#undef STORE_FUNCTION + +#define ROW_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, row_##name##_show, \ + row_##name##_store) + +static struct elv_fs_entry row_attrs[] = { + ROW_ATTR(hp_read_quantum), + ROW_ATTR(rp_read_quantum), + ROW_ATTR(hp_swrite_quantum), + ROW_ATTR(rp_swrite_quantum), + ROW_ATTR(rp_write_quantum), + ROW_ATTR(lp_read_quantum), + ROW_ATTR(lp_swrite_quantum), + ROW_ATTR(read_idle), + ROW_ATTR(read_idle_freq), + __ATTR_NULL +}; + +static struct elevator_type iosched_row = { + .ops = { + .elevator_merge_req_fn = row_merged_requests, + .elevator_dispatch_fn = row_dispatch_requests, + .elevator_add_req_fn = row_add_request, + .elevator_reinsert_req_fn = row_reinsert_req, + .elevator_is_urgent_fn = row_urgent_pending, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_set_req_fn = row_set_request, + .elevator_init_fn = row_init_queue, + .elevator_exit_fn = row_exit_queue, + }, + + .elevator_attrs = row_attrs, + .elevator_name = "row", + .elevator_owner = THIS_MODULE, +}; + +static int __init row_init(void) +{ + elv_register(&iosched_row); + return 0; +} + +static void __exit row_exit(void) +{ + elv_unregister(&iosched_row); +} + +module_init(row_init); +module_exit(row_exit); + +MODULE_LICENSE("GPLv2"); +MODULE_DESCRIPTION("Read Over Write IO scheduler"); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 4f4230b79bb..055952e92fb 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -691,6 +692,60 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod } EXPORT_SYMBOL(scsi_cmd_ioctl); +int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd) +{ + if (bd && bd == bd->bd_contains) + return 0; + + /* Actually none of these is particularly useful on a partition, + * but they are safe. + */ + switch (cmd) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SCSI_IOCTL_GET_PCI: + case SCSI_IOCTL_PROBE_HOST: + case SG_GET_VERSION_NUM: + case SG_SET_TIMEOUT: + case SG_GET_TIMEOUT: + case SG_GET_RESERVED_SIZE: + case SG_SET_RESERVED_SIZE: + case SG_EMULATED_HOST: + return 0; + case CDROM_GET_CAPABILITY: + /* Keep this until we remove the printk below. udev sends it + * and we do not want to spam dmesg about it. CD-ROMs do + * not have partitions, so we get here only for disks. + */ + return -ENOTTY; + default: + break; + } + + if (capable(CAP_SYS_RAWIO)) + return 0; + + /* In particular, rule out all resets and host-specific ioctls. */ + printk_ratelimited(KERN_WARNING + "%s: sending ioctl %x to a partition!\n", current->comm, cmd); + + return -ENOTTY; +} +EXPORT_SYMBOL(scsi_verify_blk_ioctl); + +int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode, + unsigned int cmd, void __user *arg) +{ + int ret; + + ret = scsi_verify_blk_ioctl(bd, cmd); + if (ret < 0) + return ret; + + return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg); +} +EXPORT_SYMBOL(scsi_cmd_blk_ioctl); + static int __init blk_scsi_ioctl_init(void) { blk_set_cmd_filter_defaults(&blk_default_cmd_filter); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 62122a1a2f7..fed2868e504 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -159,6 +159,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, else if (len < ds) msg->msg_flags |= MSG_TRUNC; + msg->msg_namelen = 0; + lock_sock(sk); if (ctx->more) { ctx->more = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc062d2..a1c4f0a5558 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, long copied = 0; lock_sock(sk); + msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 671d4d6d14d..7bdd61b867c 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work) struct crypto_async_request *req, *backlog; cpu_queue = container_of(work, struct cryptd_cpu_queue, work); - /* Only handle one request at a time to avoid hogging crypto - * workqueue. preempt_disable/enable is used to prevent - * being preempted by cryptd_enqueue_request() */ + /* + * Only handle one request at a time to avoid hogging crypto workqueue. + * preempt_disable/enable is used to prevent being preempted by + * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent + * cryptd_enqueue_request() being accessed from software interrupts. + */ + local_bh_disable(); preempt_disable(); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); preempt_enable(); + local_bh_enable(); if (!req) return; diff --git a/crypto/gcm.c b/crypto/gcm.c index 1a252639ef9..b97b186d561 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx { struct crypto_rfc4543_req_ctx { u8 auth_tag[16]; + u8 assocbuf[32]; struct scatterlist cipher[1]; struct scatterlist payload[2]; struct scatterlist assoc[2]; @@ -1142,9 +1143,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); - sg_init_table(assoc, 2); - sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, - req->assoc->offset); + if (req->assoc->length == req->assoclen) { + sg_init_table(assoc, 2); + sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, + req->assoc->offset); + } else { + BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); + + scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, + req->assoclen, 0); + + sg_init_table(assoc, 2); + sg_set_buf(assoc, rctx->assocbuf, req->assoclen); + } scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 9ed9f60316e..dd30f40af9f 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -21,8 +21,6 @@ #include #include -static DEFINE_PER_CPU(u64[80], msg_schedule); - static inline u64 Ch(u64 x, u64 y, u64 z) { return z ^ (x & (y ^ z)); @@ -33,11 +31,6 @@ static inline u64 Maj(u64 x, u64 y, u64 z) return (x & y) | (z & (x | y)); } -static inline u64 RORu64(u64 x, u64 y) -{ - return (x >> y) | (x << (64 - y)); -} - static const u64 sha512_K[80] = { 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, @@ -68,10 +61,10 @@ static const u64 sha512_K[80] = { 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL, }; -#define e0(x) (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39)) -#define e1(x) (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41)) -#define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7)) -#define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6)) +#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39)) +#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41)) +#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7)) +#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6)) static inline void LOAD_OP(int I, u64 *W, const u8 *input) { @@ -80,7 +73,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input) static inline void BLEND_OP(int I, u64 *W) { - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; + W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); } static void @@ -89,15 +82,7 @@ sha512_transform(u64 *state, const u8 *input) u64 a, b, c, d, e, f, g, h, t1, t2; int i; - u64 *W = get_cpu_var(msg_schedule); - - /* load the input */ - for (i = 0; i < 16; i++) - LOAD_OP(i, W, input); - - for (i = 16; i < 80; i++) { - BLEND_OP(i, W); - } + u64 W[16]; /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; @@ -105,21 +90,35 @@ sha512_transform(u64 *state, const u8 *input) /* now iterate */ for (i=0; i<80; i+=8) { - t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ]; + if (!(i & 8)) { + int j; + + if (i < 16) { + /* load the input */ + for (j = 0; j < 16; j++) + LOAD_OP(i + j, W, input); + } else { + for (j = 0; j < 16; j++) { + BLEND_OP(i + j, W); + } + } + } + + t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; - t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1]; + t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; - t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2]; + t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; - t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3]; + t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; - t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4]; + t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; - t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5]; + t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; - t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6]; + t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; - t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7]; + t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; } @@ -128,8 +127,6 @@ sha512_transform(u64 *state, const u8 *input) /* erase our data */ a = b = c = d = e = f = g = h = t1 = t2 = 0; - memset(W, 0, sizeof(__get_cpu_var(msg_schedule))); - put_cpu_var(msg_schedule); } static int @@ -177,7 +174,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) index = sctx->count[0] & 0x7f; /* Update number of bytes */ - if (!(sctx->count[0] += len)) + if ((sctx->count[0] += len) < len) sctx->count[1]++; part_len = 128 - index; diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 58c3f74bd84..958205046e0 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device) ac->charger.properties = ac_props; ac->charger.num_properties = ARRAY_SIZE(ac_props); ac->charger.get_property = get_ac_property; - power_supply_register(&ac->device->dev, &ac->charger); + result = power_supply_register(&ac->device->dev, &ac->charger); + if (result) + goto end; printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index a43fa1a57d5..1502c50273b 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -36,6 +36,7 @@ #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 static DEFINE_MUTEX(isolated_cpus_lock); +static DEFINE_MUTEX(round_robin_lock); static unsigned long power_saving_mwait_eax; @@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index) if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; - mutex_lock(&isolated_cpus_lock); + mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); @@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index) if (cpumask_empty(tmp)) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { - mutex_unlock(&isolated_cpus_lock); + mutex_unlock(&round_robin_lock); return; } for_each_cpu(cpu, tmp) { @@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index) tsk_in_cpu[tsk_index] = preferred_cpu; cpumask_set_cpu(preferred_cpu, pad_busy_cpus); cpu_weight[preferred_cpu]++; - mutex_unlock(&isolated_cpus_lock); + mutex_unlock(&round_robin_lock); set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); } diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 1055769f2f0..6d276c20b57 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -358,6 +358,7 @@ typedef enum { */ struct acpi_object_extra { ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */ + struct acpi_namespace_node *scope_node; void *region_context; /* Region-specific data */ u8 *aml_start; u32 aml_length; diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 8c7b99728aa..d69e4a53175 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -384,8 +384,32 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc) /* Execute the argument AML */ - status = acpi_ds_execute_arguments(node, node->parent, + status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node, extra_desc->extra.aml_length, extra_desc->extra.aml_start); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Validate the region address/length via the host OS */ + + status = acpi_os_validate_address(obj_desc->region.space_id, + obj_desc->region.address, + (acpi_size) obj_desc->region.length, + acpi_ut_get_node_name(node)); + + if (ACPI_FAILURE(status)) { + /* + * Invalid address/length. We will emit an error message and mark + * the region as invalid, so that it will cause an additional error if + * it is ever used. Then return AE_OK. + */ + ACPI_EXCEPTION((AE_INFO, status, + "During address validation of OpRegion [%4.4s]", + node->name.ascii)); + obj_desc->common.flags |= AOPOBJ_INVALID; + status = AE_OK; + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 110711afada..8a06dc523af 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -330,6 +330,12 @@ acpi_ex_create_region(u8 * aml_start, region_obj2 = obj_desc->common.next_object; region_obj2->extra.aml_start = aml_start; region_obj2->extra.aml_length = aml_length; + if (walk_state->scope_info) { + region_obj2->extra.scope_node = + walk_state->scope_info->scope.node; + } else { + region_obj2->extra.scope_node = node; + } /* Init the region from the operands */ diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index f915a7f3f92..b334f54bb72 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -702,7 +702,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, if ((obj_desc->common_field.start_field_bit_offset == 0) && (obj_desc->common_field.bit_length == access_bit_width)) { - status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ); + if (buffer_length >= sizeof(u64)) { + status = + acpi_ex_field_datum_io(obj_desc, 0, buffer, + ACPI_READ); + } else { + /* Use raw_datum (u64) to handle buffers < 64 bits */ + + status = + acpi_ex_field_datum_io(obj_desc, 0, &raw_datum, + ACPI_READ); + ACPI_MEMCPY(buffer, &raw_datum, buffer_length); + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 6f5588e62c0..4c531b4a962 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -350,10 +350,6 @@ static void acpi_tb_convert_fadt(void) u32 address32; u32 i; - /* Update the local FADT table header length */ - - acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); - /* * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary. * Later code will always use the X 64-bit field. Also, check for an @@ -395,6 +391,10 @@ static void acpi_tb_convert_fadt(void) acpi_gbl_FADT.boot_flags = 0; } + /* Update the local FADT table header length */ + + acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt); + /* * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X" * generic address structures as necessary. Later code will always use diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 4b7085dfc68..55edd4ae0a0 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -435,6 +435,7 @@ acpi_get_table_with_size(char *signature, return (AE_NOT_FOUND); } +ACPI_EXPORT_SYMBOL(acpi_get_table_with_size) acpi_status acpi_get_table(char *signature, diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index fcc13ac0aa1..4a15d57b7bc 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -34,6 +34,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI_PROCFS_POWER #include @@ -97,6 +98,18 @@ enum { */ ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, + /* On Lenovo Thinkpad models from 2010 and 2011, the power unit + switches between mWh and mAh depending on whether the system + is running on battery or not. When mAh is the unit, most + reported values are incorrect and need to be adjusted by + 10000/design_voltage. Verified on x201, t410, t410s, and x220. + Pre-2010 and 2012 models appear to always report in mWh and + are thus unaffected (tested with t42, t61, t500, x200, x300, + and x230). Also, in mid-2012 Lenovo issued a BIOS update for + the 2011 models that fixes the issue (tested on x220 with a + post-1.29 BIOS), but as of Nov. 2012, no such update is + available for the 2010 models. */ + ACPI_BATTERY_QUIRK_THINKPAD_MAH, }; struct acpi_battery { @@ -429,6 +442,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery) kfree(buffer.pointer); if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) battery->full_charge_capacity = battery->design_capacity; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->design_capacity = battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + /* Curiously, design_capacity_low, unlike the rest of them, + is correct. */ + /* capacity_granularity_* equal 1 on the systems tested, so + it's impossible to tell if they would need an adjustment + or not if their values were higher. */ + } return result; } @@ -469,6 +497,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery) && battery->capacity_now >= 0 && battery->capacity_now <= 100) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) && + battery->power_unit && battery->design_voltage) { + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } return result; } @@ -580,6 +613,24 @@ static void acpi_battery_quirks(struct acpi_battery *battery) } } +static void find_battery(const struct dmi_header *dm, void *private) +{ + struct acpi_battery *battery = (struct acpi_battery *)private; + /* Note: the hardcoded offsets below have been extracted from + the source code of dmidecode. */ + if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) { + const u8 *dmi_data = (const u8 *)(dm + 1); + int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6)); + if (dm->length >= 18) + dmi_capacity *= dmi_data[17]; + if (battery->design_capacity * battery->design_voltage / 1000 + != dmi_capacity && + battery->design_capacity * 10 == dmi_capacity) + set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags); + } +} + /* * According to the ACPI spec, some kinds of primary batteries can * report percentage battery remaining capacity directly to OS. @@ -605,6 +656,32 @@ static void acpi_battery_quirks2(struct acpi_battery *battery) battery->capacity_now = (battery->capacity_now * battery->full_charge_capacity) / 100; } + + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags)) + return ; + + if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { + const char *s; + s = dmi_get_system_info(DMI_PRODUCT_VERSION); + if (s && !strnicmp(s, "ThinkPad", 8)) { + dmi_walk(find_battery, battery); + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, + &battery->flags) && + battery->design_voltage) { + battery->design_capacity = + battery->design_capacity * + 10000 / battery->design_voltage; + battery->full_charge_capacity = + battery->full_charge_capacity * + 10000 / battery->design_voltage; + battery->design_capacity_warning = + battery->design_capacity_warning * + 10000 / battery->design_voltage; + battery->capacity_now = battery->capacity_now * + 10000 / battery->design_voltage; + } + } + } } static int acpi_battery_update(struct acpi_battery *battery) @@ -635,11 +712,19 @@ static int acpi_battery_update(struct acpi_battery *battery) static void acpi_battery_refresh(struct acpi_battery *battery) { + int power_unit; + if (!battery->bat.dev) return; + power_unit = battery->power_unit; + acpi_battery_get_info(battery); - /* The battery may have changed its reporting units. */ + + if (power_unit == battery->power_unit) + return; + + /* The battery has changed its reporting units. */ sysfs_remove_battery(battery); sysfs_add_battery(battery); } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d1e06c182cd..1c57307c310 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -944,14 +944,18 @@ static int __init acpi_bus_init(void) status = acpi_ec_ecdt_probe(); /* Ignore result. Not having an ECDT is not fatal. */ - acpi_bus_osc_support(); - status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); goto error1; } + /* + * _OSC method may exist in module level code, + * so it must be run after ACPI_FULL_INITIALIZATION + */ + acpi_bus_osc_support(); + /* * _PDC control method may load dynamic SSDT tables, * and we need to install the table handler before that. diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index b19a18dd994..51de186a8bf 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -71,9 +71,6 @@ enum ec_command { #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ -#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts - per one transaction */ - enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ EC_FLAGS_GPE_STORM, /* GPE storm detected */ @@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); +/* + * If the number of false interrupts per one transaction exceeds + * this threshold, will think there is a GPE storm happened and + * will disable the GPE for normal transaction. + */ +static unsigned int ec_storm_threshold __read_mostly = 8; +module_param(ec_storm_threshold, uint, 0644); +MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); + /* If we find an EC via the ECDT, we need to keep a ptr to its context */ /* External interfaces use first EC only, so remember */ typedef int (*acpi_ec_query_func) (void *data); @@ -211,7 +217,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) static int ec_poll(struct acpi_ec *ec) { unsigned long flags; - int repeat = 2; /* number of command restarts */ + int repeat = 5; /* number of command restarts */ while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); @@ -229,8 +235,6 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) - break; pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->curr_lock, flags); start_transaction(ec); @@ -319,7 +323,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) msleep(1); /* It is safe to enable the GPE outside of the transaction. */ acpi_enable_gpe(NULL, ec->gpe); - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { + } else if (t->irq_count > ec_storm_threshold) { pr_info(PREFIX "GPE storm detected, " "transactions will use polling mode\n"); set_bit(EC_FLAGS_GPE_STORM, &ec->flags); @@ -914,6 +918,17 @@ static int ec_flag_msi(const struct dmi_system_id *id) return 0; } +/* + * Clevo M720 notebook actually works ok with IRQ mode, if we lifted + * the GPE storm threshold back to 20 + */ +static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) +{ + pr_debug("Setting the EC GPE storm threshold to 20\n"); + ec_storm_threshold = 20; + return 0; +} + static struct dmi_system_id __initdata ec_dmi_table[] = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -945,10 +960,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, + { + ec_enlarge_storm_threshold, "CLEVO hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), + DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, {}, }; - int __init acpi_ec_ecdt_probe(void) { acpi_status status; diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 3b5c3189fd9..e56f3be7b07 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS] static int node_to_pxm_map[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; +unsigned char acpi_srat_revision __initdata; + int pxm_to_node(int pxm) { if (pxm < 0) @@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, static int __init acpi_parse_srat(struct acpi_table_header *table) { + struct acpi_table_srat *srat; if (!table) return -EINVAL; + srat = (struct acpi_table_srat *)table; + acpi_srat_revision = srat->header.revision; + /* Real work done in acpi_table_parse_srat below. */ return 0; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d06078d660a..ea89d85f36e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -247,8 +247,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, *control &= OSC_PCI_CONTROL_MASKS; capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; } else { - /* Run _OSC query for all possible controls. */ - capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; + /* Run _OSC query only with existing controls. */ + capbuf[OSC_CONTROL_TYPE] = root->osc_control_set; } status = acpi_pci_run_osc(root->device->handle, capbuf, &result); @@ -595,6 +595,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) if (ACPI_SUCCESS(status)) { dev_info(root->bus->bridge, "ACPI _OSC control (0x%02x) granted\n", flags); + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { + /* + * We have ASPM control, but the FADT indicates + * that it's unsupported. Clear it. + */ + pcie_clear_aspm(root->bus); + } } else { dev_info(root->bus->bridge, "ACPI _OSC request failed (%s), " diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 02d2a4c9084..18935063138 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -172,8 +172,32 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) apic_id = map_mat_entry(handle, type, acpi_id); if (apic_id == -1) apic_id = map_madt_entry(type, acpi_id); - if (apic_id == -1) - return apic_id; + if (apic_id == -1) { + /* + * On UP processor, there is no _MAT or MADT table. + * So above apic_id is always set to -1. + * + * BIOS may define multiple CPU handles even for UP processor. + * For example, + * + * Scope (_PR) + * { + * Processor (CPU0, 0x00, 0x00000410, 0x06) {} + * Processor (CPU1, 0x01, 0x00000410, 0x06) {} + * Processor (CPU2, 0x02, 0x00000410, 0x06) {} + * Processor (CPU3, 0x03, 0x00000410, 0x06) {} + * } + * + * Ignores apic_id and always returns 0 for the processor + * handle with acpi id 0 if nr_cpu_ids is 1. + * This should be the case if SMP tables are not found. + * Return -1 for other CPU's handle. + */ + if (nr_cpu_ids <= 1 && acpi_id == 0) + return acpi_id; + else + return apic_id; + } #ifdef CONFIG_SMP for_each_possible_cpu(i) { diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index a4e0f1ba604..6da4f071559 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -409,6 +409,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event) acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); + break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 431ab11c8c1..65976cb77d9 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -991,6 +991,9 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) return -EINVAL; } + if (!dev) + return -EINVAL; + dev->cpu = pr->id; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { dev->states[i].name[0] = '\0'; diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 79cb6533289..3854df25ac9 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -58,6 +58,27 @@ ACPI_MODULE_NAME("processor_thermal"); static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); static unsigned int acpi_thermal_cpufreq_is_init = 0; +#define reduction_pctg(cpu) \ + per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) + +/* + * Emulate "per package data" using per cpu data (which should really be + * provided elsewhere) + * + * Note we can lose a CPU on cpu hotunplug, in this case we forget the state + * temporarily. Fortunately that's not a big issue here (I hope) + */ +static int phys_package_first_cpu(int cpu) +{ + int i; + int id = topology_physical_package_id(cpu); + + for_each_online_cpu(i) + if (topology_physical_package_id(i) == id) + return i; + return 0; +} + static int cpu_has_cpufreq(unsigned int cpu) { struct cpufreq_policy policy; @@ -77,7 +98,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, max_freq = ( policy->cpuinfo.max_freq * - (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20) + (100 - reduction_pctg(policy->cpu) * 20) ) / 100; cpufreq_verify_within_limits(policy, 0, max_freq); @@ -103,16 +124,28 @@ static int cpufreq_get_cur_state(unsigned int cpu) if (!cpu_has_cpufreq(cpu)) return 0; - return per_cpu(cpufreq_thermal_reduction_pctg, cpu); + return reduction_pctg(cpu); } static int cpufreq_set_cur_state(unsigned int cpu, int state) { + int i; + if (!cpu_has_cpufreq(cpu)) return 0; - per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state; - cpufreq_update_policy(cpu); + reduction_pctg(cpu) = state; + + /* + * Update all the CPUs in the same package because they all + * contribute to the temperature and often share the same + * frequency. + */ + for_each_online_cpu(i) { + if (topology_physical_package_id(i) == + topology_physical_package_id(cpu)) + cpufreq_update_policy(i); + } return 0; } @@ -120,10 +153,6 @@ void acpi_thermal_cpufreq_init(void) { int i; - for (i = 0; i < nr_cpu_ids; i++) - if (cpu_present(i)) - per_cpu(cpufreq_thermal_reduction_pctg, i) = 0; - i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); if (!i) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 449c556274c..ea1fe0a2d37 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -789,8 +789,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle, static void acpi_bus_set_run_wake_flags(struct acpi_device *device) { struct acpi_device_id button_device_ids[] = { - {"PNP0C0D", 0}, {"PNP0C0C", 0}, + {"PNP0C0D", 0}, {"PNP0C0E", 0}, {"", 0}, }; @@ -802,6 +802,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device) /* Power button, Lid switch always enable wakeup */ if (!acpi_match_device_ids(device, button_device_ids)) { device->wakeup.flags.run_wake = 1; + if (!acpi_match_device_ids(device, &button_device_ids[1])) { + /* Do not use Lid/sleep button for S5 wakeup */ + if (device->wakeup.sleep_state == ACPI_STATE_S5) + device->wakeup.sleep_state = ACPI_STATE_S4; + } device_set_wakeup_capable(&device->dev, true); return; } @@ -1153,7 +1158,7 @@ static void acpi_device_set_id(struct acpi_device *device) acpi_add_id(device, ACPI_DOCK_HID); else if (!acpi_ibm_smbus_match(device)) acpi_add_id(device, ACPI_SMBUS_IBM_HID); - else if (!acpi_device_hid(device) && + else if (list_empty(&device->pnp.ids) && ACPI_IS_ROOT_DEVICE(device->parent)) { acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6c949602cbd..79ddcdee83a 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -422,12 +422,36 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, { .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCCW29FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), + }, + }, + { + .callback = init_nvs_nosave, .ident = "Averatec AV1020-ED2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), }, }, + { + .callback = init_nvs_nosave, + .ident = "Asus K54C", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Asus K54HR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 77255f250db..0364b05fb7a 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) { int result = 0; - if (!strncmp(val, "enable", strlen("enable") - 1)) { + if (!strncmp(val, "enable", strlen("enable"))) { result = acpi_debug_trace(trace_method_name, trace_debug_level, trace_debug_layer, 0); if (result) @@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) goto exit; } - if (!strncmp(val, "disable", strlen("disable") - 1)) { + if (!strncmp(val, "disable", strlen("disable"))) { int name = 0; result = acpi_debug_trace((char *)&name, trace_debug_level, trace_debug_layer, 0); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index db39e9e607d..76f0b9432d8 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -390,6 +390,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d) return 0; } +static int video_ignore_initial_backlight(const struct dmi_system_id *d) +{ + use_bios_initial_backlight = 0; + return 0; +} + static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -434,6 +440,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Folio 13-2000", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), + }, + }, {} }; @@ -1732,6 +1746,7 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) static int __init intel_opregion_present(void) { + int i915 = 0; #if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) struct pci_dev *dev = NULL; u32 address; @@ -1744,10 +1759,10 @@ static int __init intel_opregion_present(void) pci_read_config_dword(dev, 0xfc, &address); if (!address) continue; - return 1; + i915 = 1; } #endif - return 0; + return i915; } int acpi_video_register(void) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1e9ab9bf854..75a8d0f2499 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -392,12 +392,22 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ { PCI_DEVICE(0x1b4b, 0x9125), .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ + { PCI_DEVICE(0x1b4b, 0x917a), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(0x1b4b, 0x9192), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + /* Asmedia */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 000d03ae665..aa5f055dc78 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1599,6 +1599,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); + + /* some SATA bridges need us to indicate data xfer direction */ + if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && + dma_dir == DMA_FROM_DEVICE) + qc->tf.feature |= ATAPI_DMADIR; + qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { @@ -2412,6 +2418,9 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) + dev->max_sectors = ATA_MAX_SECTORS_LBA48; + if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -2543,6 +2552,7 @@ int ata_bus_probe(struct ata_port *ap) * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't @@ -4076,6 +4086,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, + { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, /* Devices we expect to fail diagnostics */ @@ -4138,6 +4149,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices which aren't very happy with higher link speeds */ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, + { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, /* * Devices which choke on SETXFER. Applies only if both the diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 7f099d6e4e0..1cbb0043638 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2602,6 +2602,7 @@ int ata_eh_reset(struct ata_link *link, int classify, * bus as we may be talking too fast. */ dev->pio_mode = XFER_PIO_0; + dev->dma_mode = 0xff; /* If the controller has a pio mode setup function * then use it to set the chipset to rights. Don't @@ -3487,7 +3488,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg u64 now = get_jiffies_64(); int *trials = void_arg; - if (ent->timestamp < now - min(now, interval)) + if ((ent->eflags & ATA_EFLAG_OLD_ER) || + (ent->timestamp < now - min(now, interval))) return -1; (*trials)++; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 927f968e99d..3b42a5d6efd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -308,7 +308,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); - if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY)) + if (atadev && ap->ops->sw_activity_show && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) return ap->ops->sw_activity_show(atadev, buf); return -EINVAL; } @@ -323,7 +324,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, enum sw_activity val; int rc; - if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + if (atadev && ap->ops->sw_activity_store && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) { val = simple_strtoul(buf, NULL, 0); switch (val) { case OFF: case BLINK_ON: case BLINK_OFF: diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 6bd9425ba5a..d750962916b 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -396,8 +396,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); active = clamp_val(t.active, 2, 15); - recover = clamp_val(t.recover, 2, 16); - recover &= 0x15; + recover = clamp_val(t.recover, 2, 16) & 0x0F; inb(0x3E6); inb(0x3E6); diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index a004b1e0ea6..ca4646aa0d6 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -147,6 +147,10 @@ struct pdc_port_priv { dma_addr_t pkt_dma; }; +struct pdc_host_priv { + spinlock_t hard_reset_lock; +}; + static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap) void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR]; void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1; unsigned int ata_no = pdc_ata_port_to_ata_no(ap); + struct pdc_host_priv *hpriv = ap->host->private_data; u8 tmp; - spin_lock(&ap->host->lock); + spin_lock(&hpriv->hard_reset_lock); tmp = readb(pcictl_b1_mmio); tmp &= ~(0x10 << ata_no); @@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap) writeb(tmp, pcictl_b1_mmio); readb(pcictl_b1_mmio); /* flush */ - spin_unlock(&ap->host->lock); + spin_unlock(&hpriv->hard_reset_lock); } static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class, @@ -1183,6 +1188,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev, const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; const struct ata_port_info *ppi[PDC_MAX_PORTS]; struct ata_host *host; + struct pdc_host_priv *hpriv; void __iomem *host_mmio; int n_ports, i, rc; int is_sataii_tx4; @@ -1220,6 +1226,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev, dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n"); return -ENOMEM; } + hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + spin_lock_init(&hpriv->hard_reset_lock); + host->private_data = hpriv; host->iomap = pcim_iomap_table(pdev); is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags); diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 35eabcf3456..84980acf324 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link, return 0; } +static int k2_sata_softreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return ata_sff_softreset(link, class, deadline); +} + +static int k2_sata_hardreset(struct ata_link *link, + unsigned int *class, unsigned long deadline) +{ + u8 dmactl; + void __iomem *mmio = link->ap->ioaddr.bmdma_addr; + + dmactl = readb(mmio + ATA_DMA_CMD); + + /* Clear the start bit */ + if (dmactl & ATA_DMA_START) { + dmactl &= ~ATA_DMA_START; + writeb(dmactl, mmio + ATA_DMA_CMD); + } + + return sata_sff_hardreset(link, class, deadline); +} static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { @@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = { static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, + .softreset = k2_sata_softreset, + .hardreset = k2_sata_hardreset, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index 077735e0e04..b2778e75e31 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -636,82 +636,82 @@ struct rx_buf_desc { #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE -typedef volatile u_int freg_t; +typedef volatile u_int ffreg_t; typedef u_int rreg_t; typedef struct _ffredn_t { - freg_t idlehead_high; /* Idle cell header (high) */ - freg_t idlehead_low; /* Idle cell header (low) */ - freg_t maxrate; /* Maximum rate */ - freg_t stparms; /* Traffic Management Parameters */ - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ - freg_t rm_type; /* */ - u_int filler5[0x17 - 0x06]; - freg_t cmd_reg; /* Command register */ - u_int filler18[0x20 - 0x18]; - freg_t cbr_base; /* CBR Pointer Base */ - freg_t vbr_base; /* VBR Pointer Base */ - freg_t abr_base; /* ABR Pointer Base */ - freg_t ubr_base; /* UBR Pointer Base */ - u_int filler24; - freg_t vbrwq_base; /* VBR Wait Queue Base */ - freg_t abrwq_base; /* ABR Wait Queue Base */ - freg_t ubrwq_base; /* UBR Wait Queue Base */ - freg_t vct_base; /* Main VC Table Base */ - freg_t vcte_base; /* Extended Main VC Table Base */ - u_int filler2a[0x2C - 0x2A]; - freg_t cbr_tab_beg; /* CBR Table Begin */ - freg_t cbr_tab_end; /* CBR Table End */ - freg_t cbr_pointer; /* CBR Pointer */ - u_int filler2f[0x30 - 0x2F]; - freg_t prq_st_adr; /* Packet Ready Queue Start Address */ - freg_t prq_ed_adr; /* Packet Ready Queue End Address */ - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ - u_int filler38[0x40 - 0x38]; - freg_t queue_base; /* Base address for PRQ and TCQ */ - freg_t desc_base; /* Base address of descriptor table */ - u_int filler42[0x45 - 0x42]; - freg_t mode_reg_0; /* Mode register 0 */ - freg_t mode_reg_1; /* Mode register 1 */ - freg_t intr_status_reg;/* Interrupt Status register */ - freg_t mask_reg; /* Mask Register */ - freg_t cell_ctr_high1; /* Total cell transfer count (high) */ - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */ - freg_t state_reg; /* Status register */ - u_int filler4c[0x58 - 0x4c]; - freg_t curr_desc_num; /* Contains the current descriptor num */ - freg_t next_desc; /* Next descriptor */ - freg_t next_vc; /* Next VC */ - u_int filler5b[0x5d - 0x5b]; - freg_t present_slot_cnt;/* Present slot count */ - u_int filler5e[0x6a - 0x5e]; - freg_t new_desc_num; /* New descriptor number */ - freg_t new_vc; /* New VC */ - freg_t sched_tbl_ptr; /* Schedule table pointer */ - freg_t vbrwq_wptr; /* VBR wait queue write pointer */ - freg_t vbrwq_rptr; /* VBR wait queue read pointer */ - freg_t abrwq_wptr; /* ABR wait queue write pointer */ - freg_t abrwq_rptr; /* ABR wait queue read pointer */ - freg_t ubrwq_wptr; /* UBR wait queue write pointer */ - freg_t ubrwq_rptr; /* UBR wait queue read pointer */ - freg_t cbr_vc; /* CBR VC */ - freg_t vbr_sb_vc; /* VBR SB VC */ - freg_t abr_sb_vc; /* ABR SB VC */ - freg_t ubr_sb_vc; /* UBR SB VC */ - freg_t vbr_next_link; /* VBR next link */ - freg_t abr_next_link; /* ABR next link */ - freg_t ubr_next_link; /* UBR next link */ - u_int filler7a[0x7c-0x7a]; - freg_t out_rate_head; /* Out of rate head */ - u_int filler7d[0xca-0x7d]; /* pad out to full address space */ - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ - u_int fillercc[0x100-0xcc]; /* pad out to full address space */ + ffreg_t idlehead_high; /* Idle cell header (high) */ + ffreg_t idlehead_low; /* Idle cell header (low) */ + ffreg_t maxrate; /* Maximum rate */ + ffreg_t stparms; /* Traffic Management Parameters */ + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */ + ffreg_t rm_type; /* */ + u_int filler5[0x17 - 0x06]; + ffreg_t cmd_reg; /* Command register */ + u_int filler18[0x20 - 0x18]; + ffreg_t cbr_base; /* CBR Pointer Base */ + ffreg_t vbr_base; /* VBR Pointer Base */ + ffreg_t abr_base; /* ABR Pointer Base */ + ffreg_t ubr_base; /* UBR Pointer Base */ + u_int filler24; + ffreg_t vbrwq_base; /* VBR Wait Queue Base */ + ffreg_t abrwq_base; /* ABR Wait Queue Base */ + ffreg_t ubrwq_base; /* UBR Wait Queue Base */ + ffreg_t vct_base; /* Main VC Table Base */ + ffreg_t vcte_base; /* Extended Main VC Table Base */ + u_int filler2a[0x2C - 0x2A]; + ffreg_t cbr_tab_beg; /* CBR Table Begin */ + ffreg_t cbr_tab_end; /* CBR Table End */ + ffreg_t cbr_pointer; /* CBR Pointer */ + u_int filler2f[0x30 - 0x2F]; + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */ + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */ + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */ + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */ + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/ + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */ + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */ + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/ + u_int filler38[0x40 - 0x38]; + ffreg_t queue_base; /* Base address for PRQ and TCQ */ + ffreg_t desc_base; /* Base address of descriptor table */ + u_int filler42[0x45 - 0x42]; + ffreg_t mode_reg_0; /* Mode register 0 */ + ffreg_t mode_reg_1; /* Mode register 1 */ + ffreg_t intr_status_reg;/* Interrupt Status register */ + ffreg_t mask_reg; /* Mask Register */ + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */ + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */ + ffreg_t state_reg; /* Status register */ + u_int filler4c[0x58 - 0x4c]; + ffreg_t curr_desc_num; /* Contains the current descriptor num */ + ffreg_t next_desc; /* Next descriptor */ + ffreg_t next_vc; /* Next VC */ + u_int filler5b[0x5d - 0x5b]; + ffreg_t present_slot_cnt;/* Present slot count */ + u_int filler5e[0x6a - 0x5e]; + ffreg_t new_desc_num; /* New descriptor number */ + ffreg_t new_vc; /* New VC */ + ffreg_t sched_tbl_ptr; /* Schedule table pointer */ + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */ + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */ + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */ + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */ + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */ + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */ + ffreg_t cbr_vc; /* CBR VC */ + ffreg_t vbr_sb_vc; /* VBR SB VC */ + ffreg_t abr_sb_vc; /* ABR SB VC */ + ffreg_t ubr_sb_vc; /* UBR SB VC */ + ffreg_t vbr_next_link; /* VBR next link */ + ffreg_t abr_next_link; /* ABR next link */ + ffreg_t ubr_next_link; /* UBR next link */ + u_int filler7a[0x7c-0x7a]; + ffreg_t out_rate_head; /* Out of rate head */ + u_int filler7d[0xca-0x7d]; /* pad out to full address space */ + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */ + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */ + u_int fillercc[0x100-0xcc]; /* pad out to full address space */ } ffredn_t; typedef struct _rfredn_t { diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 5d1d0764513..adfce9f1eb9 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card) for (port = 0; tx_pending; tx_pending >>= 1, port++) { if (tx_pending & 1) { struct sk_buff *oldskb = card->tx_skb[port]; - if (oldskb) + if (oldskb) { pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, oldskb->len, PCI_DMA_TODEVICE); - + card->tx_skb[port] = NULL; + } spin_lock(&card->tx_queue_lock); skb = skb_dequeue(&card->tx_queue[port]); if (!skb) @@ -984,6 +985,7 @@ static uint32_t fpga_tx(struct solos_card *card) } else if (skb && card->using_dma) { SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, skb->len, PCI_DMA_TODEVICE); + card->tx_skb[port] = skb; iowrite32(SKB_CB(skb)->dma_addr, card->config_regs + TX_DMA_ADDR(port)); } @@ -1152,7 +1154,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) db_fpga_upgrade = db_firmware_upgrade = 0; } - if (card->fpga_version >= DMA_SUPPORTED){ + if (card->fpga_version >= DMA_SUPPORTED) { + pci_set_master(dev); card->using_dma = 1; } else { card->using_dma = 0; diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 12f5c475fe2..edfd95d3f69 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -182,4 +182,30 @@ config GENLOCK_MISCDEVICE Create a miscdevice for the purposes of allowing userspace to create and interact with locks created using genlock. +config SYNC + bool "Synchronization framework" + default n + select ANON_INODES + help + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config SW_SYNC + bool "Software synchronization objects" + default n + depends on SYNC + help + A sync object driver that uses a 32bit counter to coordinate + syncrhronization. Useful when there is no hardware primitive backing + the synchronization. + +config SW_SYNC_USER + bool "Userspace API for SW_SYNC" + default n + depends on SW_SYNC + help + Provides a user space API to the sw sync object. + *WARNING* improper use of this can result in deadlocking kernel + drivers from userspace. endmenu diff --git a/drivers/base/Makefile b/drivers/base/Makefile index d3e2ec13240..b930083bb16 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -20,5 +20,8 @@ obj-$(CONFIG_MODULES) += module.o endif obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o +obj-$(CONFIG_SYNC) += sync.o +obj-$(CONFIG_SW_SYNC) += sw_sync.o + ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 000e7b2006f..8b8e8c06f29 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -289,7 +289,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, struct device *dev; int error = 0; - if (!bus) + if (!bus || !bus->p) return -EINVAL; klist_iter_init_node(&bus->p->klist_devices, &i, @@ -323,7 +323,7 @@ struct device *bus_find_device(struct bus_type *bus, struct klist_iter i; struct device *dev; - if (!bus) + if (!bus || !bus->p) return NULL; klist_iter_init_node(&bus->p->klist_devices, &i, diff --git a/drivers/base/core.c b/drivers/base/core.c index 78445f40c43..d13851c5c68 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1743,8 +1743,10 @@ void device_shutdown(void) */ list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); - /* Disable all device's runtime power management */ - pm_runtime_disable(dev); + + /* Don't allow any more runtime suspends */ + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); if (dev->bus && dev->bus->shutdown) { dev_dbg(dev, "shutdown\n"); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 06ed6b4e7df..3719c94be19 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev, int loading = simple_strtol(buf, NULL, 10); int i; + mutex_lock(&fw_lock); + + if (!fw_priv->fw) + goto out; + switch (loading) { case 1: - mutex_lock(&fw_lock); - if (!fw_priv->fw) { - mutex_unlock(&fw_lock); - break; - } firmware_free_data(fw_priv->fw); memset(fw_priv->fw, 0, sizeof(struct firmware)); /* If the pages are not owned by 'struct firmware' */ @@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev, fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); - mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { @@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev, fw_load_abort(fw_priv); break; } - +out: + mutex_unlock(&fw_lock); return count; } diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c index 9f7a6008f22..1fb606f116d 100644 --- a/drivers/base/genlock.c +++ b/drivers/base/genlock.c @@ -34,7 +34,15 @@ #define GENLOCK_LOG_ERR(fmt, args...) \ pr_err("genlock: %s: " fmt, __func__, ##args) +/* The genlock magic stored in the kernel private data is used to protect + * against the possibility of user space passing a valid fd to a + * non-genlock file for genlock_attach_lock() + */ +#define GENLOCK_MAGIC_OK 0xD2EAD10C +#define GENLOCK_MAGIC_BAD 0xD2EADBAD + struct genlock { + unsigned int magic; /* Magic for attach verification */ struct list_head active; /* List of handles holding lock */ spinlock_t lock; /* Spinlock to protect the lock internals */ wait_queue_head_t queue; /* Holding pen for processes pending lock */ @@ -56,7 +64,7 @@ struct genlock_handle { * released while another process tries to attach it */ -static DEFINE_SPINLOCK(genlock_file_lock); +static DEFINE_SPINLOCK(genlock_ref_lock); static void genlock_destroy(struct kref *kref) { @@ -68,10 +76,9 @@ static void genlock_destroy(struct kref *kref) * still active after the lock gets released */ - spin_lock(&genlock_file_lock); if (lock->file) lock->file->private_data = NULL; - spin_unlock(&genlock_file_lock); + lock->magic = GENLOCK_MAGIC_BAD; kfree(lock); } @@ -125,6 +132,7 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle) init_waitqueue_head(&lock->queue); spin_lock_init(&lock->lock); + lock->magic = GENLOCK_MAGIC_OK; lock->state = _UNLOCKED; /* @@ -193,21 +201,30 @@ struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd) * released and then attached */ - spin_lock(&genlock_file_lock); + spin_lock(&genlock_ref_lock); lock = file->private_data; - spin_unlock(&genlock_file_lock); fput(file); if (lock == NULL) { GENLOCK_LOG_ERR("File descriptor is invalid\n"); - return ERR_PTR(-EINVAL); + goto fail_invalid; + } + + if (lock->magic != GENLOCK_MAGIC_OK) { + GENLOCK_LOG_ERR("Magic is invalid - 0x%X\n", lock->magic); + goto fail_invalid; } handle->lock = lock; kref_get(&lock->refcount); + spin_unlock(&genlock_ref_lock); return lock; + +fail_invalid: + spin_unlock(&genlock_ref_lock); + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL(genlock_attach_lock); @@ -278,7 +295,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, { unsigned long irqflags; int ret = 0; - unsigned int ticks = msecs_to_jiffies(timeout); + unsigned long ticks = msecs_to_jiffies(timeout); spin_lock_irqsave(&lock->lock, irqflags); @@ -297,12 +314,15 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, if (handle_has_lock(lock, handle)) { /* - * If the handle already holds the lock and the type matches, - * then just increment the active pointer. This allows the - * handle to do recursive locks + * If the handle already holds the lock and the lock type is + * a read lock then just increment the active pointer. This + * allows the handle to do recursive read locks. Recursive + * write locks are not allowed in order to support + * synchronization within a process using a single gralloc + * handle. */ - if (lock->state == op) { + if (lock->state == _RDLOCK && op == _RDLOCK) { handle->active++; goto done; } @@ -311,32 +331,45 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, * If the handle holds a write lock then the owner can switch * to a read lock if they want. Do the transition atomically * then wake up any pending waiters in case they want a read - * lock too. + * lock too. In order to support synchronization within a + * process the caller must explicity request to convert the + * lock type with the GENLOCK_WRITE_TO_READ flag. */ - if (op == _RDLOCK && handle->active == 1) { - lock->state = _RDLOCK; - wake_up(&lock->queue); - goto done; + if (flags & GENLOCK_WRITE_TO_READ) { + if (lock->state == _WRLOCK && op == _RDLOCK) { + lock->state = _RDLOCK; + wake_up(&lock->queue); + goto done; + } else { + GENLOCK_LOG_ERR("Invalid state to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } } + } else { /* - * Otherwise the user tried to turn a read into a write, and we - * don't allow that. + * Check to ensure the caller has not attempted to convert a + * write to a read without holding the lock. */ - GENLOCK_LOG_ERR("Trying to upgrade a read lock to a write" - "lock\n"); - ret = -EINVAL; - goto done; - } - /* - * If we request a read and the lock is held by a read, then go - * ahead and share the lock - */ + if (flags & GENLOCK_WRITE_TO_READ) { + GENLOCK_LOG_ERR("Handle must have lock to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } - if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) - goto dolock; + /* + * If we request a read and the lock is held by a read, then go + * ahead and share the lock + */ + + if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) + goto dolock; + } /* Treat timeout 0 just like a NOBLOCK flag and return if the lock cannot be aquired without blocking */ @@ -346,15 +379,26 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, goto done; } - /* Wait while the lock remains in an incompatible state */ + /* + * Wait while the lock remains in an incompatible state + * state op wait + * ------------------- + * unlocked n/a no + * read read no + * read write yes + * write n/a yes + */ - while (lock->state != _UNLOCKED) { - int elapsed; + while ((lock->state == _RDLOCK && op == _WRLOCK) || + lock->state == _WRLOCK) { + signed long elapsed; spin_unlock_irqrestore(&lock->lock, irqflags); elapsed = wait_event_interruptible_timeout(lock->queue, - lock->state == _UNLOCKED, ticks); + lock->state == _UNLOCKED || + (lock->state == _RDLOCK && op == _RDLOCK), + ticks); spin_lock_irqsave(&lock->lock, irqflags); @@ -363,7 +407,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, goto done; } - ticks = elapsed; + ticks = (unsigned long) elapsed; } dolock: @@ -371,7 +415,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, list_add_tail(&handle->entry, &lock->active); lock->state = op; - handle->active = 1; + handle->active++; done: spin_unlock_irqrestore(&lock->lock, irqflags); @@ -380,7 +424,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, } /** - * genlock_lock - Acquire or release a lock + * genlock_lock - Acquire or release a lock (depreciated) * @handle - pointer to the genlock handle that is requesting the lock * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) * @flags - flags to control the operation @@ -391,10 +435,73 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, int genlock_lock(struct genlock_handle *handle, int op, int flags, uint32_t timeout) + { + struct genlock *lock; + unsigned long irqflags; + + int ret = 0; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } + + switch (op) { + case GENLOCK_UNLOCK: + ret = _genlock_unlock(lock, handle); + break; + case GENLOCK_RDLOCK: + spin_lock_irqsave(&lock->lock, irqflags); + if (handle_has_lock(lock, handle)) { + /* request the WRITE_TO_READ flag for compatibility */ + flags |= GENLOCK_WRITE_TO_READ; + } + spin_unlock_irqrestore(&lock->lock, irqflags); + /* fall through to take lock */ + case GENLOCK_WRLOCK: + ret = _genlock_lock(lock, handle, op, flags, timeout); + break; + default: + GENLOCK_LOG_ERR("Invalid lock operation\n"); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(genlock_lock); + +/** + * genlock_dreadlock - Acquire or release a lock + * @handle - pointer to the genlock handle that is requesting the lock + * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) + * @flags - flags to control the operation + * @timeout - optional timeout to wait for the lock to come free + * + * Returns: 0 on success or error code on failure + */ + +int genlock_dreadlock(struct genlock_handle *handle, int op, int flags, + uint32_t timeout) { - struct genlock *lock = handle->lock; + struct genlock *lock; + int ret = 0; + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + if (lock == NULL) { GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); return -EINVAL; @@ -416,7 +523,7 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags, return ret; } -EXPORT_SYMBOL(genlock_lock); +EXPORT_SYMBOL(genlock_dreadlock); /** * genlock_wait - Wait for the lock to be released @@ -429,7 +536,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) struct genlock *lock = handle->lock; unsigned long irqflags; int ret = 0; - unsigned int ticks = msecs_to_jiffies(timeout); + unsigned long ticks = msecs_to_jiffies(timeout); if (lock == NULL) { GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); @@ -449,7 +556,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) } while (lock->state != _UNLOCKED) { - int elapsed; + signed long elapsed; spin_unlock_irqrestore(&lock->lock, irqflags); @@ -463,7 +570,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) break; } - ticks = elapsed; + ticks = (unsigned long) elapsed; } done: @@ -493,7 +600,9 @@ void genlock_release_lock(struct genlock_handle *handle) } spin_unlock_irqrestore(&handle->lock->lock, flags); + spin_lock(&genlock_ref_lock); kref_put(&handle->lock->refcount, genlock_destroy); + spin_unlock(&genlock_ref_lock); handle->lock = NULL; handle->active = 0; } @@ -635,6 +744,14 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, return genlock_lock(handle, param.op, param.flags, param.timeout); } + case GENLOCK_IOC_DREADLOCK: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + return genlock_dreadlock(handle, param.op, param.flags, + param.timeout); + } case GENLOCK_IOC_WAIT: { if (copy_from_user(¶m, (void __user *) arg, sizeof(param))) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 45d7c8fc73b..5fb6aaed2ad 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -223,6 +223,42 @@ int memory_isolate_notify(unsigned long val, void *v) return atomic_notifier_call_chain(&memory_isolate_chain, val, v); } +/* + * The probe routines leave the pages reserved, just as the bootmem code does. + * Make sure they're still that way. + */ +static bool pages_correctly_reserved(unsigned long start_pfn, + unsigned long nr_pages) +{ + int i, j; + struct page *page; + unsigned long pfn = start_pfn; + + /* + * memmap between sections is not contiguous except with + * SPARSEMEM_VMEMMAP. We lookup the page once per section + * and assume memmap is contiguous within each section + */ + for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { + if (WARN_ON_ONCE(!pfn_valid(pfn))) + return false; + page = pfn_to_page(pfn); + + for (j = 0; j < PAGES_PER_SECTION; j++) { + if (PageReserved(page + j)) + continue; + + printk(KERN_WARNING "section number %ld page number %d " + "not reserved, was it already online?\n", + pfn_to_section_nr(pfn), j); + + return false; + } + } + + return true; +} + /* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here. @@ -230,7 +266,6 @@ int memory_isolate_notify(unsigned long val, void *v) static int memory_block_action(unsigned long phys_index, unsigned long action) { - int i; unsigned long start_pfn, start_paddr; unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; struct page *first_page; @@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_index, unsigned long action) first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT); - /* - * The probe routines leave the pages reserved, just - * as the bootmem code does. Make sure they're still - * that way. - */ - if (action == MEM_ONLINE) { - for (i = 0; i < nr_pages; i++) { - if (PageReserved(first_page+i)) - continue; - - printk(KERN_WARNING "section number %ld page number %d " - "not reserved, was it already online?\n", - phys_index, i); - return -EBUSY; - } - } - switch (action) { case MEM_ONLINE: start_pfn = page_to_pfn(first_page); + + if (!pages_correctly_reserved(start_pfn, nr_pages)) + return -EBUSY; + ret = online_pages(start_pfn, nr_pages); break; case MEM_OFFLINE: diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 102339213c5..13f7db6bcde 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -360,7 +360,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto repeat; } - dev->power.deferred_resume = false; if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ @@ -420,6 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { + dev->power.deferred_resume = false; rpm_resume(dev, 0); retval = -EAGAIN; goto out; @@ -533,6 +533,7 @@ static int rpm_resume(struct device *dev, int rpmflags) || dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); + retval = 1; goto no_callback; /* Assume success. */ } spin_unlock(&dev->parent->power.lock); @@ -610,7 +611,7 @@ static int rpm_resume(struct device *dev, int rpmflags) } wake_up_all(&dev->power.wait_queue); - if (!retval) + if (retval >= 0) rpm_idle(dev, RPM_ASYNC); out: diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c new file mode 100644 index 00000000000..21ddf4ffd58 --- /dev/null +++ b/drivers/base/sw_sync.c @@ -0,0 +1,256 @@ +/* + * drivers/base/sw_sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static int sw_sync_cmp(u32 a, u32 b) +{ + if (a == b) + return 0; + + return ((s32)a - (s32)b) < 0 ? -1 : 1; +} + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +{ + struct sw_sync_pt *pt; + + pt = (struct sw_sync_pt *) + sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt)); + + pt->value = value; + + return (struct sync_pt *)pt; +} + +static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); +} + +static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + return sw_sync_cmp(obj->value, pt->value) >= 0; +} + +static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ + struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; + struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; + + return sw_sync_cmp(pt_a->value, pt_b->value); +} + +static void sw_sync_print_obj(struct seq_file *s, + struct sync_timeline *sync_timeline) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *)sync_timeline; + + seq_printf(s, "%d", obj->value); +} + +static void sw_sync_print_pt(struct seq_file *s, struct sync_pt *sync_pt) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + struct sw_sync_timeline *obj = + (struct sw_sync_timeline *)sync_pt->parent; + + seq_printf(s, "%d / %d", pt->value, obj->value); +} + +static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, + void *data, int size) +{ + struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + + if (size < sizeof(pt->value)) + return -ENOMEM; + + memcpy(data, &pt->value, sizeof(pt->value)); + + return sizeof(pt->value); +} + +struct sync_timeline_ops sw_sync_timeline_ops = { + .driver_name = "sw_sync", + .dup = sw_sync_pt_dup, + .has_signaled = sw_sync_pt_has_signaled, + .compare = sw_sync_pt_compare, + .print_obj = sw_sync_print_obj, + .print_pt = sw_sync_print_pt, + .fill_driver_data = sw_sync_fill_driver_data, +}; + + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + struct sw_sync_timeline *obj = (struct sw_sync_timeline *) + sync_timeline_create(&sw_sync_timeline_ops, + sizeof(struct sw_sync_timeline), + name); + + return obj; +} + +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ + obj->value += inc; + + sync_timeline_signal(&obj->obj); +} + + +#ifdef CONFIG_SW_SYNC_USER +/* *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +int sw_sync_open(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj; + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, current); + + obj = sw_sync_timeline_create(task_comm); + if (obj == NULL) + return -ENOMEM; + + file->private_data = obj; + + return 0; +} + +int sw_sync_release(struct inode *inode, struct file *file) +{ + struct sw_sync_timeline *obj = file->private_data; + sync_timeline_destroy(&obj->obj); + return 0; +} + +long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_pt *pt; + struct sync_fence *fence; + struct sw_sync_create_fence_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + pt = sw_sync_pt_create(obj, data.value); + if (pt == NULL) { + err = -ENOMEM; + goto err; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence = sync_fence_create(data.name, pt); + if (fence == NULL) { + sync_pt_free(pt); + err = -ENOMEM; + goto err; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + sync_fence_put(fence); + err = -EFAULT; + goto err; + } + + sync_fence_install(fence, fd); + + return 0; + +err: + put_unused_fd(fd); + return err; +} + +long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ + u32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + sw_sync_timeline_inc(obj, value); + + return 0; +} + +long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct sw_sync_timeline *obj = file->private_data; + + switch (cmd) { + case SW_SYNC_IOC_CREATE_FENCE: + return sw_sync_ioctl_create_fence(obj, arg); + + case SW_SYNC_IOC_INC: + return sw_sync_ioctl_inc(obj, arg); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sw_sync_fops = { + .owner = THIS_MODULE, + .open = sw_sync_open, + .release = sw_sync_release, + .unlocked_ioctl = sw_sync_ioctl, +}; + +static struct miscdevice sw_sync_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw_sync", + .fops = &sw_sync_fops, +}; + +int __init sw_sync_device_init(void) +{ + return misc_register(&sw_sync_dev); +} + +void __exit sw_sync_device_remove(void) +{ + misc_deregister(&sw_sync_dev); +} + +module_init(sw_sync_device_init); +module_exit(sw_sync_device_remove); + +#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/base/sync.c b/drivers/base/sync.c new file mode 100755 index 00000000000..809d02b21e0 --- /dev/null +++ b/drivers/base/sync.c @@ -0,0 +1,1009 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define CREATE_TRACE_POINTS +#include + +static void sync_fence_signal_pt(struct sync_pt *pt); +static int _sync_pt_has_signaled(struct sync_pt *pt); +static void sync_fence_free(struct kref *kref); +static void sync_dump(void); + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); + +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, + int size, const char *name) +{ + struct sync_timeline *obj; + unsigned long flags; + + if (size < sizeof(struct sync_timeline)) + return NULL; + + obj = kzalloc(size, GFP_KERNEL); + if (obj == NULL) + return NULL; + + kref_init(&obj->kref); + obj->ops = ops; + strlcpy(obj->name, name, sizeof(obj->name)); + + INIT_LIST_HEAD(&obj->child_list_head); + spin_lock_init(&obj->child_list_lock); + + INIT_LIST_HEAD(&obj->active_list_head); + spin_lock_init(&obj->active_list_lock); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + return obj; +} +EXPORT_SYMBOL(sync_timeline_create); + +static void sync_timeline_free(struct kref *kref) +{ + struct sync_timeline *obj = + container_of(kref, struct sync_timeline, kref); + unsigned long flags; + + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_del(&obj->sync_timeline_list); + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + kfree(obj); +} + +void sync_timeline_destroy(struct sync_timeline *obj) +{ + obj->destroyed = true; + + /* + * If this is not the last reference, signal any children + * that their parent is going away. + */ + + if (!kref_put(&obj->kref, sync_timeline_free)) + sync_timeline_signal(obj); +} +EXPORT_SYMBOL(sync_timeline_destroy); + +static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) +{ + unsigned long flags; + + pt->parent = obj; + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_add_tail(&pt->child_list, &obj->child_list_head); + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_timeline_remove_pt(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + + spin_lock_irqsave(&obj->active_list_lock, flags); + if (!list_empty(&pt->active_list)) + list_del_init(&pt->active_list); + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + spin_lock_irqsave(&obj->child_list_lock, flags); + if (!list_empty(&pt->child_list)) { + list_del_init(&pt->child_list); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +void sync_timeline_signal(struct sync_timeline *obj) +{ + unsigned long flags; + LIST_HEAD(signaled_pts); + struct list_head *pos, *n; + + trace_sync_timeline(obj); + + spin_lock_irqsave(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &obj->active_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, active_list); + + if (_sync_pt_has_signaled(pt)) { + list_del_init(pos); + list_add(&pt->signaled_list, &signaled_pts); + kref_get(&pt->fence->kref); + } + } + + spin_unlock_irqrestore(&obj->active_list_lock, flags); + + list_for_each_safe(pos, n, &signaled_pts) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, signaled_list); + + list_del_init(pos); + sync_fence_signal_pt(pt); + kref_put(&pt->fence->kref, sync_fence_free); + } +} +EXPORT_SYMBOL(sync_timeline_signal); + +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) +{ + struct sync_pt *pt; + + if (size < sizeof(struct sync_pt)) + return NULL; + + pt = kzalloc(size, GFP_KERNEL); + if (pt == NULL) + return NULL; + + INIT_LIST_HEAD(&pt->active_list); + kref_get(&parent->kref); + sync_timeline_add_pt(parent, pt); + + return pt; +} +EXPORT_SYMBOL(sync_pt_create); + +void sync_pt_free(struct sync_pt *pt) +{ + if (pt->parent->ops->free_pt) + pt->parent->ops->free_pt(pt); + + sync_timeline_remove_pt(pt); + + kref_put(&pt->parent->kref, sync_timeline_free); + + kfree(pt); +} +EXPORT_SYMBOL(sync_pt_free); + +/* call with pt->parent->active_list_lock held */ +static int _sync_pt_has_signaled(struct sync_pt *pt) +{ + int old_status = pt->status; + + if (!pt->status) + pt->status = pt->parent->ops->has_signaled(pt); + + if (!pt->status && pt->parent->destroyed) + pt->status = -ENOENT; + + if (pt->status != old_status) + pt->timestamp = ktime_get(); + + return pt->status; +} + +static struct sync_pt *sync_pt_dup(struct sync_pt *pt) +{ + return pt->parent->ops->dup(pt); +} + +/* Adds a sync pt to the active queue. Called when added to a fence */ +static void sync_pt_activate(struct sync_pt *pt) +{ + struct sync_timeline *obj = pt->parent; + unsigned long flags; + int err; + + spin_lock_irqsave(&obj->active_list_lock, flags); + + err = _sync_pt_has_signaled(pt); + if (err != 0) + goto out; + + list_add_tail(&pt->active_list, &obj->active_list_head); + +out: + spin_unlock_irqrestore(&obj->active_list_lock, flags); +} + +static int sync_fence_release(struct inode *inode, struct file *file); +static unsigned int sync_fence_poll(struct file *file, poll_table *wait); +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + + +static const struct file_operations sync_fence_fops = { + .release = sync_fence_release, + .poll = sync_fence_poll, + .unlocked_ioctl = sync_fence_ioctl, +}; + +static struct sync_fence *sync_fence_alloc(const char *name) +{ + struct sync_fence *fence; + unsigned long flags; + + fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, + fence, 0); + if (fence->file == NULL) + goto err; + + kref_init(&fence->kref); + strlcpy(fence->name, name, sizeof(fence->name)); + + INIT_LIST_HEAD(&fence->pt_list_head); + INIT_LIST_HEAD(&fence->waiter_list_head); + spin_lock_init(&fence->waiter_list_lock); + + init_waitqueue_head(&fence->wq); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + return fence; + +err: + kfree(fence); + return NULL; +} + +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +{ + struct sync_fence *fence; + + if (pt->fence) + return NULL; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + pt->fence = fence; + list_add(&pt->pt_list, &fence->pt_list_head); + sync_pt_activate(pt); + + /* + * signal the fence in case pt was activated before + * sync_pt_activate(pt) was called + */ + sync_fence_signal_pt(pt); + + return fence; +} +EXPORT_SYMBOL(sync_fence_create); + +static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *pos; + + list_for_each(pos, &src->pt_list_head) { + struct sync_pt *orig_pt = + container_of(pos, struct sync_pt, pt_list); + struct sync_pt *new_pt = sync_pt_dup(orig_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + sync_pt_activate(new_pt); + } + + return 0; +} + +static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) +{ + struct list_head *src_pos, *dst_pos, *n; + + list_for_each(src_pos, &src->pt_list_head) { + struct sync_pt *src_pt = + container_of(src_pos, struct sync_pt, pt_list); + bool collapsed = false; + + list_for_each_safe(dst_pos, n, &dst->pt_list_head) { + struct sync_pt *dst_pt = + container_of(dst_pos, struct sync_pt, pt_list); + /* collapse two sync_pts on the same timeline + * to a single sync_pt that will signal at + * the later of the two + */ + if (dst_pt->parent == src_pt->parent) { + if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) { + struct sync_pt *new_pt = + sync_pt_dup(src_pt); + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_replace(&dst_pt->pt_list, + &new_pt->pt_list); + sync_pt_activate(new_pt); + sync_pt_free(dst_pt); + } + collapsed = true; + break; + } + } + + if (!collapsed) { + struct sync_pt *new_pt = sync_pt_dup(src_pt); + + if (new_pt == NULL) + return -ENOMEM; + + new_pt->fence = dst; + list_add(&new_pt->pt_list, &dst->pt_list_head); + sync_pt_activate(new_pt); + } + } + + return 0; +} + +static void sync_fence_detach_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_timeline_remove_pt(pt); + } +} + +static void sync_fence_free_pts(struct sync_fence *fence) +{ + struct list_head *pos, *n; + + list_for_each_safe(pos, n, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + sync_pt_free(pt); + } +} + +struct sync_fence *sync_fence_fdget(int fd) +{ + struct file *file = fget(fd); + + if (file == NULL) + return NULL; + + if (file->f_op != &sync_fence_fops) + goto err; + + return file->private_data; + +err: + fput(file); + return NULL; +} +EXPORT_SYMBOL(sync_fence_fdget); + +void sync_fence_put(struct sync_fence *fence) +{ + fput(fence->file); +} +EXPORT_SYMBOL(sync_fence_put); + +void sync_fence_install(struct sync_fence *fence, int fd) +{ + fd_install(fd, fence->file); +} +EXPORT_SYMBOL(sync_fence_install); + +static int sync_fence_get_status(struct sync_fence *fence) +{ + struct list_head *pos; + int status = 1; + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + int pt_status = pt->status; + + if (pt_status < 0) { + status = pt_status; + break; + } else if (status == 1) { + status = pt_status; + } + } + + return status; +} + +struct sync_fence *sync_fence_merge(const char *name, + struct sync_fence *a, struct sync_fence *b) +{ + struct sync_fence *fence; + int err; + + fence = sync_fence_alloc(name); + if (fence == NULL) + return NULL; + + err = sync_fence_copy_pts(fence, a); + if (err < 0) + goto err; + + err = sync_fence_merge_pts(fence, b); + if (err < 0) + goto err; + + /* + * signal the fence in case one of it's pts were activated before + * they were activated + */ + sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, + struct sync_pt, + pt_list)); + + return fence; +err: + sync_fence_free_pts(fence); + kfree(fence); + return NULL; +} +EXPORT_SYMBOL(sync_fence_merge); + +static void sync_fence_signal_pt(struct sync_pt *pt) +{ + LIST_HEAD(signaled_waiters); + struct sync_fence *fence = pt->fence; + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int status; + + status = sync_fence_get_status(fence); + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * this should protect against two threads racing on the signaled + * false -> true transition + */ + if (status && !fence->status) { + list_for_each_safe(pos, n, &fence->waiter_list_head) + list_move(pos, &signaled_waiters); + + fence->status = status; + } else { + status = 0; + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + if (status) { + list_for_each_safe(pos, n, &signaled_waiters) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + list_del(pos); + waiter->callback(fence, waiter); + } + wake_up(&fence->wq); + } +} + +int sync_fence_wait_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + + if (fence->status) { + err = fence->status; + goto out; + } + + list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); +out: + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + + return err; +} +EXPORT_SYMBOL(sync_fence_wait_async); + +int sync_fence_cancel_async(struct sync_fence *fence, + struct sync_fence_waiter *waiter) +{ + struct list_head *pos; + struct list_head *n; + unsigned long flags; + int ret = -ENOENT; + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + /* + * Make sure waiter is still in waiter_list because it is possible for + * the waiter to be removed from the list while the callback is still + * pending. + */ + list_for_each_safe(pos, n, &fence->waiter_list_head) { + struct sync_fence_waiter *list_waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + if (list_waiter == waiter) { + list_del(pos); + ret = 0; + break; + } + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + return ret; +} +EXPORT_SYMBOL(sync_fence_cancel_async); + +static bool sync_fence_check(struct sync_fence *fence) +{ + /* + * Make sure that reads to fence->status are ordered with the + * wait queue event triggering + */ + smp_rmb(); + return fence->status != 0; +} + +int sync_fence_wait(struct sync_fence *fence, long timeout) +{ + int err = 0; + struct sync_pt *pt; + + trace_sync_wait(fence, 1); + list_for_each_entry(pt, &fence->pt_list_head, pt_list) + trace_sync_pt(pt); + + if (timeout > 0) { + timeout = msecs_to_jiffies(timeout); + err = wait_event_interruptible_timeout(fence->wq, + sync_fence_check(fence), + timeout); + } else if (timeout < 0) { + err = wait_event_interruptible(fence->wq, + sync_fence_check(fence)); + } + trace_sync_wait(fence, 0); + + if (err < 0) + return err; + + if (fence->status < 0) { + pr_info("fence error %d on [%p]\n", fence->status, fence); + sync_dump(); + return fence->status; + } + + if (fence->status == 0) { + pr_info("fence timeout on [%p] after %dms\n", fence, + jiffies_to_msecs(timeout)); + sync_dump(); + return -ETIME; + } + + return 0; +} +EXPORT_SYMBOL(sync_fence_wait); + +static void sync_fence_free(struct kref *kref) +{ + struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + + sync_fence_free_pts(fence); + + kfree(fence); +} + +static int sync_fence_release(struct inode *inode, struct file *file) +{ + struct sync_fence *fence = file->private_data; + unsigned long flags; + + /* + * We need to remove all ways to access this fence before droping + * our ref. + * + * start with its membership in the global fence list + */ + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_del(&fence->sync_fence_list); + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + + /* + * remove its pts from their parents so that sync_timeline_signal() + * can't reference the fence. + */ + sync_fence_detach_pts(fence); + + kref_put(&fence->kref, sync_fence_free); + + return 0; +} + +static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +{ + struct sync_fence *fence = file->private_data; + + poll_wait(file, &fence->wq, wait); + + /* + * Make sure that reads to fence->status are ordered with the + * wait queue event triggering + */ + smp_rmb(); + + if (fence->status == 1) + return POLLIN; + else if (fence->status < 0) + return POLLERR; + else + return 0; +} + +static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) +{ + __s32 value; + + if (copy_from_user(&value, (void __user *)arg, sizeof(value))) + return -EFAULT; + + return sync_fence_wait(fence, value); +} + +static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +{ + int fd = get_unused_fd(); + int err; + struct sync_fence *fence2, *fence3; + struct sync_merge_data data; + + if (fd < 0) + return fd; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { + err = -EFAULT; + goto err_put_fd; + } + + fence2 = sync_fence_fdget(data.fd2); + if (fence2 == NULL) { + err = -ENOENT; + goto err_put_fd; + } + + data.name[sizeof(data.name) - 1] = '\0'; + fence3 = sync_fence_merge(data.name, fence, fence2); + if (fence3 == NULL) { + err = -ENOMEM; + goto err_put_fence2; + } + + data.fence = fd; + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + err = -EFAULT; + goto err_put_fence3; + } + + sync_fence_install(fence3, fd); + sync_fence_put(fence2); + return 0; + +err_put_fence3: + sync_fence_put(fence3); + +err_put_fence2: + sync_fence_put(fence2); + +err_put_fd: + put_unused_fd(fd); + return err; +} + +static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) +{ + struct sync_pt_info *info = data; + int ret; + + if (size < sizeof(struct sync_pt_info)) + return -ENOMEM; + + info->len = sizeof(struct sync_pt_info); + + if (pt->parent->ops->fill_driver_data) { + ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, + size - sizeof(*info)); + if (ret < 0) + return ret; + + info->len += ret; + } + + strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); + strlcpy(info->driver_name, pt->parent->ops->driver_name, + sizeof(info->driver_name)); + info->status = pt->status; + info->timestamp_ns = ktime_to_ns(pt->timestamp); + + return info->len; +} + +static long sync_fence_ioctl_fence_info(struct sync_fence *fence, + unsigned long arg) +{ + struct sync_fence_info_data *data; + struct list_head *pos; + __u32 size; + __u32 len = 0; + int ret; + + if (copy_from_user(&size, (void __user *)arg, sizeof(size))) + return -EFAULT; + + if (size < sizeof(struct sync_fence_info_data)) + return -EINVAL; + + if (size > 4096) + size = 4096; + + data = kzalloc(size, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + strlcpy(data->name, fence->name, sizeof(data->name)); + data->status = fence->status; + len = sizeof(struct sync_fence_info_data); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + + ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + + if (ret < 0) + goto out; + + len += ret; + } + + data->len = len; + + if (copy_to_user((void __user *)arg, data, len)) + ret = -EFAULT; + else + ret = 0; + +out: + kfree(data); + + return ret; +} + +static long sync_fence_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct sync_fence *fence = file->private_data; + switch (cmd) { + case SYNC_IOC_WAIT: + return sync_fence_ioctl_wait(fence, arg); + + case SYNC_IOC_MERGE: + return sync_fence_ioctl_merge(fence, arg); + + case SYNC_IOC_FENCE_INFO: + return sync_fence_ioctl_fence_info(fence, arg); + + default: + return -ENOTTY; + } +} + +#ifdef CONFIG_DEBUG_FS +static const char *sync_status_str(int status) +{ + if (status > 0) + return "signaled"; + else if (status == 0) + return "active"; + else + return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ + int status = pt->status; + seq_printf(s, " %s%spt %s", + fence ? pt->parent->name : "", + fence ? "_" : "", + sync_status_str(status)); + if (pt->status) { + struct timeval tv = ktime_to_timeval(pt->timestamp); + seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); + } + + if (pt->parent->ops->timeline_value_str && + pt->parent->ops->pt_value_str) { + char value[64]; + pt->parent->ops->pt_value_str(pt, value, sizeof(value)); + seq_printf(s, ": %s", value); + if (fence) { + pt->parent->ops->timeline_value_str(pt->parent, value, + sizeof(value)); + seq_printf(s, " / %s", value); + } + } else if (pt->parent->ops->print_pt) { + seq_printf(s, ": "); + pt->parent->ops->print_pt(s, pt); + } + + seq_printf(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + + if (obj->ops->timeline_value_str) { + char value[64]; + obj->ops->timeline_value_str(obj, value, sizeof(value)); + seq_printf(s, ": %s", value); + } else if (obj->ops->print_obj) { + seq_printf(s, ": "); + obj->ops->print_obj(s, obj); + } + + seq_printf(s, "\n"); + + spin_lock_irqsave(&obj->child_list_lock, flags); + list_for_each(pos, &obj->child_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, child_list); + sync_print_pt(s, pt, false); + } + spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ + struct list_head *pos; + unsigned long flags; + + seq_printf(s, "[%p] %s: %s\n", fence, fence->name, + sync_status_str(fence->status)); + + list_for_each(pos, &fence->pt_list_head) { + struct sync_pt *pt = + container_of(pos, struct sync_pt, pt_list); + sync_print_pt(s, pt, true); + } + + spin_lock_irqsave(&fence->waiter_list_lock, flags); + list_for_each(pos, &fence->waiter_list_head) { + struct sync_fence_waiter *waiter = + container_of(pos, struct sync_fence_waiter, + waiter_list); + + seq_printf(s, "waiter %pF\n", waiter->callback); + } + spin_unlock_irqrestore(&fence->waiter_list_lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ + unsigned long flags; + struct list_head *pos; + + seq_printf(s, "objs:\n--------------\n"); + + spin_lock_irqsave(&sync_timeline_list_lock, flags); + list_for_each(pos, &sync_timeline_list_head) { + struct sync_timeline *obj = + container_of(pos, struct sync_timeline, + sync_timeline_list); + + sync_print_obj(s, obj); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + + seq_printf(s, "fences:\n--------------\n"); + + spin_lock_irqsave(&sync_fence_list_lock, flags); + list_for_each(pos, &sync_fence_list_head) { + struct sync_fence *fence = + container_of(pos, struct sync_fence, sync_fence_list); + + sync_print_fence(s, fence); + seq_printf(s, "\n"); + } + spin_unlock_irqrestore(&sync_fence_list_lock, flags); + return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { + .open = sync_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static __init int sync_debugfs_init(void) +{ + debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); + return 0; +} +late_initcall(sync_debugfs_init); + +#define DUMP_CHUNK 256 +static char sync_dump_buf[64 * 1024]; +void sync_dump(void) +{ + struct seq_file s = { + .buf = sync_dump_buf, + .size = sizeof(sync_dump_buf) - 1, + }; + int i; + + sync_debugfs_show(&s, NULL); + + for (i = 0; i < s.count; i += DUMP_CHUNK) { + if ((s.count - i) > DUMP_CHUNK) { + char c = s.buf[i + DUMP_CHUNK]; + s.buf[i + DUMP_CHUNK] = 0; + pr_cont("%s", s.buf + i); + s.buf[i + DUMP_CHUNK] = c; + } else { + s.buf[s.count] = 0; + pr_cont("%s", s.buf + i); + } + } +} +#else +static void sync_dump(void) +{ +} +#endif diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index be52344ed19..a9cb23845cf 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -110,9 +110,10 @@ static int bcma_register_cores(struct bcma_bus *bus) static void bcma_unregister_cores(struct bcma_bus *bus) { - struct bcma_device *core; + struct bcma_device *core, *tmp; - list_for_each_entry(core, &bus->cores, list) { + list_for_each_entry_safe(core, tmp, &bus->cores, list) { + list_del(&core->list); if (core->dev_registered) device_unregister(&core->dev); } diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index e086fbbbe85..8db9089127c 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -1177,7 +1177,8 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T int TimeoutCounter; int i; - + memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T)); + if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32))) return DAC960_Failure(Controller, "DMA mask out of range"); Controller->BounceBufferLimit = DMA_BIT_MASK(32); @@ -4627,7 +4628,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) DAC960_Controller_T *Controller = Command->Controller; DAC960_CommandType_T CommandType = Command->CommandType; DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; - DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode; DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus; if (CommandType == DAC960_ReadCommand || @@ -4699,7 +4701,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) { if (Controller->ShutdownMonitoringTimer) return; - if (CommandOpcode == DAC960_V2_GetControllerInfo) + if (IOCTLOpcode == DAC960_V2_GetControllerInfo) { DAC960_V2_ControllerInfo_T *NewControllerInfo = Controller->V2.NewControllerInformation; @@ -4719,14 +4721,14 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) memcpy(ControllerInfo, NewControllerInfo, sizeof(DAC960_V2_ControllerInfo_T)); } - else if (CommandOpcode == DAC960_V2_GetEvent) + else if (IOCTLOpcode == DAC960_V2_GetEvent) { if (CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_ReportEvent(Controller, Controller->V2.Event); } Controller->V2.NextEventSequenceNumber++; } - else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && + else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo = @@ -4915,7 +4917,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) NewPhysicalDeviceInfo->LogicalUnit++; Controller->V2.PhysicalDeviceIndex++; } - else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) + else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) { unsigned int DeviceIndex; for (DeviceIndex = Controller->V2.PhysicalDeviceIndex; @@ -4938,7 +4940,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) } Controller->V2.NeedPhysicalDeviceInformation = false; } - else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid && + else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid && CommandStatus == DAC960_V2_NormalCompletion) { DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo = @@ -5065,7 +5067,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) [LogicalDeviceNumber] = true; NewLogicalDeviceInfo->LogicalDeviceNumber++; } - else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid) + else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid) { int LogicalDriveNumber; for (LogicalDriveNumber = 0; diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index db195abad69..e49ddd0aea1 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "47" +#define VERSION "47q" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 528f6318ded..2a0fdaeefcd 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -277,8 +277,6 @@ aoeblk_gdalloc(void *vp) goto err_mempool; blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; - if (bdi_init(&d->blkq->backing_dev_info)) - goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; @@ -299,9 +297,6 @@ aoeblk_gdalloc(void *vp) aoedisk_add_sysfs(d); return; -err_blkq: - blk_cleanup_queue(d->blkq); - d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index de0435e63b0..db30542b122 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -30,11 +30,13 @@ new_skb(ulong len) { struct sk_buff *skb; - skb = alloc_skb(len, GFP_ATOMIC); + skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); if (skb) { + skb_reserve(skb, MAX_HEADER); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); + skb_checksum_none_assert(skb); } return skb; } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index dba1c32e1dd..f5dfb8498c3 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; + page->index = idx; if (radix_tree_insert(&brd->brd_pages, idx, page)) { __free_page(page); page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); - } else - page->index = idx; + } spin_unlock(&brd->brd_lock); radix_tree_preload_end(); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index c2f9b3e3dec..1dab802d82b 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1716,7 +1716,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, case CCISS_BIG_PASSTHRU: return cciss_bigpassthru(h, argp); - /* scsi_cmd_ioctl handles these, below, though some are not */ + /* scsi_cmd_blk_ioctl handles these, below, though some are not */ /* very meaningful for cciss. SG_IO is the main one people want. */ case SG_GET_VERSION_NUM: @@ -1727,9 +1727,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, case SG_EMULATED_HOST: case SG_IO: case SCSI_IOCTL_SEND_COMMAND: - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); + return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); - /* scsi_cmd_ioctl would normally handle these, below, but */ + /* scsi_cmd_blk_ioctl would normally handle these, below, but */ /* they aren't a good fit for cciss, as CD-ROMs are */ /* not supported, and we don't have any bus/target/lun */ /* which we present to the kernel. */ diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 696100241a6..f01b3507e5b 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, { case CMD_TARGET_STATUS: /* Pass it up to the upper layers... */ - if( ei->ScsiStatus) - { -#if 0 - printk(KERN_WARNING "cciss: cmd %p " - "has SCSI Status = %x\n", - c, ei->ScsiStatus); -#endif - cmd->result |= (ei->ScsiStatus << 1); - } - else { /* scsi status is zero??? How??? */ + if (!ei->ScsiStatus) { /* Ordinarily, this case should never happen, but there is a bug in some released firmware revisions that allows it to happen @@ -804,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; @@ -866,6 +858,7 @@ cciss_scsi_detect(ctlr_info_t *h) sh->can_queue = cciss_tape_cmds; sh->sg_tablesize = h->maxsgentries; sh->max_cmd_len = MAX_COMMAND_SIZE; + sh->max_sectors = h->cciss_max_sectors; ((struct cciss_scsi_adapter_data_t *) h->scsi_ctlr)->scsi_host = sh; @@ -1410,7 +1403,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, /* track how many SG entries we are using */ if (request_nsgs > h->maxSG) h->maxSG = request_nsgs; - c->Header.SGTotal = (__u8) request_nsgs + chained; + c->Header.SGTotal = (u16) request_nsgs + chained; if (request_nsgs > h->max_cmd_sgentries) c->Header.SGList = h->max_cmd_sgentries; else diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 43beaca5317..13cbdd32895 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2225,7 +2225,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) if (hg == -1 && mdev->state.role == R_PRIMARY) { enum drbd_state_rv rv2; - drbd_set_role(mdev, R_SECONDARY, 0); /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, * we might be here in C_WF_REPORT_PARAMS which is transient. * we do not need to wait for the after state change work either. */ diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3424d675b76..e59f53679d6 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req const int rw = bio_data_dir(bio); int cpu; cpu = part_stat_lock(); + part_round_stats(cpu, &mdev->vdisk->part0); part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]); part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio)); part_inc_in_flight(&mdev->vdisk->part0, rw); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 9955a53733b..215ddc8261c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -1032,37 +1032,6 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function) return 0; } -static DEFINE_SPINLOCK(floppy_hlt_lock); -static int hlt_disabled; -static void floppy_disable_hlt(void) -{ - unsigned long flags; - - WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012"); - spin_lock_irqsave(&floppy_hlt_lock, flags); - if (!hlt_disabled) { - hlt_disabled = 1; -#ifdef HAVE_DISABLE_HLT - disable_hlt(); -#endif - } - spin_unlock_irqrestore(&floppy_hlt_lock, flags); -} - -static void floppy_enable_hlt(void) -{ - unsigned long flags; - - spin_lock_irqsave(&floppy_hlt_lock, flags); - if (hlt_disabled) { - hlt_disabled = 0; -#ifdef HAVE_DISABLE_HLT - enable_hlt(); -#endif - } - spin_unlock_irqrestore(&floppy_hlt_lock, flags); -} - static void setup_DMA(void) { unsigned long f; @@ -1107,7 +1076,6 @@ static void setup_DMA(void) fd_enable_dma(); release_dma_lock(f); #endif - floppy_disable_hlt(); } static void show_floppy(void); @@ -1709,7 +1677,6 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id) fd_disable_dma(); release_dma_lock(f); - floppy_enable_hlt(); do_floppy = NULL; if (fdc >= N_FDC || FDCS->address == -1) { /* we don't even know which FDC is the culprit */ @@ -1858,8 +1825,6 @@ static void floppy_shutdown(unsigned long data) show_floppy(); cancel_activity(); - floppy_enable_hlt(); - flags = claim_dma_lock(); fd_disable_dma(); release_dma_lock(flags); @@ -4198,6 +4163,7 @@ static int __init floppy_init(void) disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock); if (!disks[dr]->queue) { + put_disk(disks[dr]); err = -ENOMEM; goto out_put_disk; } @@ -4504,7 +4470,6 @@ static void floppy_release_irq_and_dma(void) #if N_FDC > 1 set_dor(1, ~8, 0); #endif - floppy_enable_hlt(); if (floppy_track_buffer && max_buffer_sectors) { tmpsize = max_buffer_sectors * 1024; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 2ebacf0e1ed..38f8da9ab9b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -928,6 +928,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, wake_up_process(lo->lo_thread); if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); + + /* Grab the block_device to prevent its destruction after we + * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). + */ + bdgrab(bdev); return 0; out_clr: @@ -1024,8 +1029,10 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); memset(lo->lo_file_name, 0, LO_NAME_SIZE); - if (bdev) + if (bdev) { + bdput(bdev); invalidate_bdev(bdev); + } set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { @@ -1274,11 +1281,9 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev) /* the width of sector_t may be narrow for bit-shift */ sz = sec; sz <<= 9; - mutex_lock(&bdev->bd_mutex); bd_set_size(bdev, sz); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); - mutex_unlock(&bdev->bd_mutex); out: return err; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 48e8fee9f2d..94f6ae2bd0c 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) int op_len, err; void *req_buf; - if (!(((u64)1 << ((u64)op - 1)) & port->operations)) + if (!(((u64)1 << (u64)op) & port->operations)) return -EOPNOTSUPP; switch (op) { diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index b70f0fca9a4..eec7b7a43cb 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -1116,7 +1116,7 @@ static inline void carm_handle_resp(struct carm_host *host, break; case MISC_GET_FW_VER: { struct carm_fw_ver *ver = (struct carm_fw_ver *) - mem + sizeof(struct carm_msg_get_fw_ver); + (mem + sizeof(struct carm_msg_get_fw_ver)); if (!error) { host->fw_ver = le32_to_cpu(ver->version); host->flags |= (ver->features & FL_FW_VER_MASK); diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 0e376d46bdd..7333b9e4441 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode) static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct gendisk *disk = bdev->bd_disk; void __user *usermem = (void __user *) arg; int ret; mutex_lock(&ub_mutex); - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem); mutex_unlock(&ub_mutex); return ret; diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 031ca720d92..afa8463b2be 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,6 +513,44 @@ static void process_page(unsigned long data) } } +struct mm_plug_cb { + struct blk_plug_cb cb; + struct cardinfo *card; +}; + +static void mm_unplug(struct blk_plug_cb *cb) +{ + struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + + spin_lock_irq(&mmcb->card->lock); + activate(mmcb->card); + spin_unlock_irq(&mmcb->card->lock); + kfree(mmcb); +} + +static int mm_check_plugged(struct cardinfo *card) +{ + struct blk_plug *plug = current->plug; + struct mm_plug_cb *mmcb; + + if (!plug) + return 0; + + list_for_each_entry(mmcb, &plug->cb_list, cb.list) { + if (mmcb->cb.callback == mm_unplug && mmcb->card == card) + return 1; + } + /* Not currently on the callback list */ + mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); + if (!mmcb) + return 0; + + mmcb->card = card; + mmcb->cb.callback = mm_unplug; + list_add(&mmcb->cb.list, &plug->cb_list); + return 1; +} + static int mm_make_request(struct request_queue *q, struct bio *bio) { struct cardinfo *card = q->queuedata; @@ -523,6 +561,8 @@ static int mm_make_request(struct request_queue *q, struct bio *bio) *card->biotail = bio; bio->bi_next = NULL; card->biotail = &bio->bi_next; + if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card)) + activate(card); spin_unlock_irq(&card->lock); return 0; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 079c08808d8..5d7a9340363 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -236,8 +236,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) return -ENOTTY; - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, - (void __user *)data); + return scsi_cmd_blk_ioctl(bdev, mode, cmd, + (void __user *)data); } /* We provide getgeo only to please some old bootloader/partitioning tools */ diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 54139d0f5fe..92bdc4001f2 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -650,13 +650,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, bio->bi_end_io = end_block_io_op; } - /* - * We set it one so that the last submit_bio does not have to call - * atomic_inc. - */ atomic_set(&pending_req->pendcnt, nbio); - - /* Get a reference count for the disk queue and start sending I/O */ blk_start_plug(&plug); for (i = 0; i < nbio; i++) @@ -684,6 +678,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); + atomic_set(&pending_req->pendcnt, 1); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 6cc0db1bf52..97ded2552d9 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -400,6 +400,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev) be->blkif = NULL; } + kfree(be->mode); kfree(be); dev_set_drvdata(&dev->dev, NULL); return 0; @@ -482,6 +483,7 @@ static void backend_changed(struct xenbus_watch *watch, = container_of(watch, struct backend_info, backend_watch); struct xenbus_device *dev = be->dev; int cdrom = 0; + unsigned long handle; char *device_type; DPRINTK(""); @@ -501,10 +503,10 @@ static void backend_changed(struct xenbus_watch *watch, return; } - if ((be->major || be->minor) && - ((be->major != major) || (be->minor != minor))) { - pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", - be->major, be->minor, major, minor); + if (be->major | be->minor) { + if (be->major != major || be->minor != minor) + pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", + be->major, be->minor, major, minor); return; } @@ -522,36 +524,33 @@ static void backend_changed(struct xenbus_watch *watch, kfree(device_type); } - if (be->major == 0 && be->minor == 0) { - /* Front end dir is a number, which is used as the handle. */ - - char *p = strrchr(dev->otherend, '/') + 1; - long handle; - err = strict_strtoul(p, 0, &handle); - if (err) - return; + /* Front end dir is a number, which is used as the handle. */ + err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle); + if (err) + return; - be->major = major; - be->minor = minor; + be->major = major; + be->minor = minor; - err = xen_vbd_create(be->blkif, handle, major, minor, - (NULL == strchr(be->mode, 'w')), cdrom); - if (err) { - be->major = 0; - be->minor = 0; - xenbus_dev_fatal(dev, err, "creating vbd structure"); - return; - } + err = xen_vbd_create(be->blkif, handle, major, minor, + !strchr(be->mode, 'w'), cdrom); + if (err) + xenbus_dev_fatal(dev, err, "creating vbd structure"); + else { err = xenvbd_sysfs_addif(dev); if (err) { xen_vbd_free(&be->blkif->vbd); - be->major = 0; - be->minor = 0; xenbus_dev_fatal(dev, err, "creating sysfs entries"); - return; } + } + if (err) { + kfree(be->mode); + be->mode = NULL; + be->major = 0; + be->minor = 0; + } else { /* We're potentially connected now */ xen_update_blkif_status(be->blkif); } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 695d4414bd4..e4e61907fc1 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -62,16 +62,28 @@ static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0CF3, 0x3002) }, + { USB_DEVICE(0x13d3, 0x3304) }, + { USB_DEVICE(0x0930, 0x0215) }, + { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, + { USB_DEVICE(0x0CF3, 0x311D) }, + { USB_DEVICE(0x0CF3, 0x817a) }, + { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x04CA, 0x3005) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, + /* Atheros AR5BBU22 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE03C) }, + { } /* Terminating entry */ }; @@ -83,7 +95,15 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU22 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 14776e5049c..8a039f1d338 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -59,6 +59,12 @@ static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Apple-specific (Broadcom) devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, + + /* Broadcom SoftSailing reporting vendor specific */ + { USB_DEVICE(0x0a5c, 0x21e1) }, + /* Apple MacBookPro 7,1 */ { USB_DEVICE(0x05ac, 0x8213) }, @@ -90,6 +96,16 @@ static struct usb_device_id btusb_table[] = { /* Canyon CN-BTU1 with HID interfaces */ { USB_DEVICE(0x0c10, 0x0000) }, + /* Broadcom BCM20702A0 */ + { USB_DEVICE(0x0489, 0xe042) }, + { USB_DEVICE(0x413c, 0x8197) }, + + /* Foxconn - Hon Hai */ + { USB_DEVICE(0x0489, 0xe033) }, + + /*Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + { } /* Terminating entry */ }; @@ -104,16 +120,28 @@ static struct usb_device_id blacklist_table[] = { /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ - { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -485,15 +513,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); - urb->dev = data->udev; - urb->pipe = pipe; - urb->context = hdev; - urb->complete = btusb_isoc_complete; - urb->interval = data->isoc_rx_ep->bInterval; + usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, + hdev, data->isoc_rx_ep->bInterval); urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; - urb->transfer_buffer = buf; - urb->transfer_buffer_length = size; __fill_isoc_descriptor(urb, size, le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index b07af022ca9..d34ec1c240d 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -237,7 +237,6 @@ static void hci_uart_destruct(struct hci_dev *hdev) return; BT_DBG("%s", hdev->name); - kfree(hdev->driver_data); } /* ------ LDISC part ------ */ @@ -310,12 +309,13 @@ static void hci_uart_tty_close(struct tty_struct *tty) hci_uart_close(hdev); if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { - hu->proto->close(hu); if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); } + hu->proto->close(hu); } + kfree(hu); } } diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 75fb965b8f7..cc6471aa9f4 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2114,11 +2114,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, if (!nr) return -ENOMEM; - if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) { - ret = -EFAULT; - goto out; - } - cgc.data_direction = CGC_DATA_READ; while (nframes > 0) { if (nr > nframes) @@ -2127,7 +2122,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW); if (ret) break; - if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { + if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { ret = -EFAULT; break; } @@ -2135,7 +2130,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, nframes -= nr; lba += nr; } -out: kfree(cgc.buffer); return ret; } @@ -2741,12 +2735,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, { void __user *argp = (void __user *)arg; int ret; - struct gendisk *disk = bdev->bd_disk; /* * Try the generic SCSI command ioctl's first. */ - ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); if (ret != -ENOTTY) return ret; diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index b427711be4b..58b49d1a283 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -897,6 +897,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_B43_HB), ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 5da67f165af..6f246049d5b 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -211,6 +211,7 @@ #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 +#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069 #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig index bb8ee2e3a82..53df29b7611 100644 --- a/drivers/char/diag/Kconfig +++ b/drivers/char/diag/Kconfig @@ -18,15 +18,6 @@ config DIAG_OVER_USB default y help This feature helps segregate code required for DIAG traffic to go over USB. - -config MODEM_DIAG_MASTER - bool "Set if Modem is to be the master on DIAG" - depends on ARCH_MSM - default n - help - Diag master: Android just forwards the Diag packet to modem. Modem will remove HDLC by itself. - Diag slave: Android kernel should remove HDLC from Diag packet before send to modem. - In the latest modem codebase, it was n by default. endmenu menu "SDIO support for DIAG" @@ -38,3 +29,13 @@ config DIAG_SDIO_PIPE help SDIO Transport Layer for DIAG Router endmenu + +menu "HSIC support for DIAG" + +config DIAG_HSIC_PIPE + depends on USB_QCOM_DIAG_BRIDGE + default y + bool "Enable 9K DIAG traffic over HSIC" + help + HSIC Transport Layer for DIAG Router +endmenu diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile index 52ab2b94e1d..c62b7fdd30a 100644 --- a/drivers/char/diag/Makefile +++ b/drivers/char/diag/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_DIAG_CHAR) := diagchar.o obj-$(CONFIG_DIAG_SDIO_PIPE) += diagfwd_sdio.o +obj-$(CONFIG_DIAG_HSIC_PIPE) += diagfwd_hsic.o diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagmem.o diagfwd_cntl.o diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index f96c724bf3d..7025ea8ebca 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,14 +17,13 @@ #include #include #include -#include #include #include #include -#include #include /* Size of the USB buffers used for read and write*/ #define USB_MAX_OUT_BUF 4096 +#define APPS_BUF_SIZE 2000 #define IN_BUF_SIZE 16384 #define MAX_IN_BUF_SIZE 32768 #define MAX_SYNC_OBJ_NAME_SIZE 32 @@ -41,20 +40,24 @@ #define APPS_DATA 3 #define SDIO_DATA 4 #define WCNSS_DATA 5 +#define HSIC_DATA 6 #define MODEM_PROC 0 #define APPS_PROC 1 #define QDSP_PROC 2 #define WCNSS_PROC 3 -#define MSG_MASK_SIZE 8000 +#define MSG_MASK_SIZE 9500 #define LOG_MASK_SIZE 8000 #define EVENT_MASK_SIZE 1000 #define USER_SPACE_DATA 8000 #define PKT_SIZE 4096 -#define MAX_EQUIP_ID 12 +#define MAX_EQUIP_ID 15 +#define DIAG_CTRL_MSG_LOG_MASK 9 +#define DIAG_CTRL_MSG_EVENT_MASK 10 +#define DIAG_CTRL_MSG_F3_MASK 11 /* Maximum number of pkt reg supported at initialization*/ -extern unsigned int diag_max_registration; -extern unsigned int diag_threshold_registration; +extern unsigned int diag_max_reg; +extern unsigned int diag_threshold_reg; #define APPEND_DEBUG(ch) \ do { \ @@ -130,11 +133,7 @@ struct diagchar_dev { int *data_ready; int num_clients; struct diag_write_device *buf_tbl; - spinlock_t diagchar_lock; -#ifdef CONFIG_DIAG_SDIO_PIPE - struct cdev *cdev_mdm; - int num_mdmclients; -#endif + /* Memory pool parameters */ unsigned int itemsize; unsigned int poolsize; @@ -152,7 +151,11 @@ struct diagchar_dev { int count_hdlc_pool; int count_write_struct_pool; int used; - + /* Buffers for masks */ + struct mutex diag_cntl_mutex; + struct diag_ctrl_event_mask *event_mask; + struct diag_ctrl_log_mask *log_mask; + struct diag_ctrl_msg_mask *msg_mask; /* State for diag forwarding */ unsigned char *buf_in_1; unsigned char *buf_in_2; @@ -162,13 +165,13 @@ struct diagchar_dev { unsigned char *buf_in_qdsp_cntl; unsigned char *buf_in_wcnss; unsigned char *buf_in_wcnss_cntl; - struct mutex diagcharmdm_mutex; - wait_queue_head_t mdmwait_q; - struct diag_client_map *mdmclient_map; - int *mdmdata_ready; unsigned char *usb_buf_out; unsigned char *apps_rsp_buf; unsigned char *user_space_data; + /* buffer for updating mask to peripherals */ + unsigned char *buf_msg_mask_update; + unsigned char *buf_log_mask_update; + unsigned char *buf_event_mask_update; smd_channel_t *ch; smd_channel_t *ch_cntl; smd_channel_t *chqdsp; @@ -191,7 +194,6 @@ struct diagchar_dev { struct work_struct diag_read_work; #endif struct workqueue_struct *diag_wq; - struct wake_lock wake_lock; struct work_struct diag_drain_work; struct work_struct diag_read_smd_work; struct work_struct diag_read_smd_cntl_work; @@ -199,6 +201,10 @@ struct diagchar_dev { struct work_struct diag_read_smd_qdsp_cntl_work; struct work_struct diag_read_smd_wcnss_work; struct work_struct diag_read_smd_wcnss_cntl_work; + struct workqueue_struct *diag_cntl_wq; + struct work_struct diag_modem_mask_update_work; + struct work_struct diag_qdsp_mask_update_work; + struct work_struct diag_wcnss_mask_update_work; uint8_t *msg_masks; uint8_t *log_masks; int log_masks_length; @@ -214,45 +220,43 @@ struct diagchar_dev { struct diag_request *write_ptr_qdsp_2; struct diag_request *write_ptr_wcnss; int logging_mode; + int mask_check; int logging_process_id; -#if DIAG_XPST - unsigned char nohdlc; - unsigned char in_busy_dmrounter; - struct mutex smd_lock; - unsigned char init_done; - unsigned char is2ARM11; -#endif #ifdef CONFIG_DIAG_SDIO_PIPE - unsigned char *buf_in_sdio_1; - unsigned char *buf_in_sdio_2; + unsigned char *buf_in_sdio; unsigned char *usb_buf_mdm_out; struct sdio_channel *sdio_ch; int read_len_mdm; - int in_busy_sdio_1; - int in_busy_sdio_2; + int in_busy_sdio; struct usb_diag_ch *mdm_ch; struct work_struct diag_read_mdm_work; struct workqueue_struct *diag_sdio_wq; struct work_struct diag_read_sdio_work; - struct work_struct diag_remove_sdio_work; + struct work_struct diag_close_sdio_work; struct diag_request *usb_read_mdm_ptr; - struct diag_request *write_ptr_mdm_1; - struct diag_request *write_ptr_mdm_2; + struct diag_request *write_ptr_mdm; +#endif +#ifdef CONFIG_DIAG_HSIC_PIPE + unsigned char *buf_in_hsic; + unsigned char *usb_buf_mdm_out; + int hsic_initialized; + int hsic_ch; + int hsic_device_enabled; + int hsic_device_opened; + int read_len_mdm; + int in_busy_hsic_read_on_mdm; + int in_busy_hsic_write_on_mdm; + int in_busy_hsic_write; + int in_busy_hsic_read; + int usb_mdm_connected; + struct usb_diag_ch *mdm_ch; + struct workqueue_struct *diag_hsic_wq; + struct work_struct diag_read_mdm_work; + struct work_struct diag_read_hsic_work; + struct diag_request *usb_read_mdm_ptr; + struct diag_request *write_ptr_mdm; #endif - u64 diag_smd_count; /* from smd */ - u64 diag_qdsp_count; /* from qdsp */ - void (*enable_sd_log)(unsigned int enable); - int qxdm2sd_drop; }; -#define EPST_FUN 1 -#define HPST_FUN 0 - -#if defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) || defined(CONFIG_ARCH_MSM7X27A) -#define SMDDIAG_NAME "DIAG" -#else -#define SMDDIAG_NAME "SMD_DIAG" -#endif extern struct diagchar_dev *driver; -extern int is_wcnss_used; #endif diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 0126f28dc08..bbed3299cba 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -30,15 +30,11 @@ #include "diagfwd_cntl.h" #ifdef CONFIG_DIAG_SDIO_PIPE #include "diagfwd_sdio.h" -static unsigned char *buf_9k; #endif -#include - -#ifdef CONFIG_USB_ANDROID_MDM9K_DIAG -int diag_support_mdm9k = 1; -#else -int diag_support_mdm9k = 0; +#ifdef CONFIG_DIAG_HSIC_PIPE +#include "diagfwd_hsic.h" #endif +#include MODULE_DESCRIPTION("Diag Char Driver"); MODULE_LICENSE("GPL v2"); @@ -52,7 +48,7 @@ struct diagchar_priv { }; /* The following variables can be specified by module options */ /* for copy buffer */ -static unsigned int itemsize = 2048; /*Size of item in the mempool */ +static unsigned int itemsize = 4096; /*Size of item in the mempool */ static unsigned int poolsize = 10; /*Number of items in the mempool */ /* for hdlc buffer */ static unsigned int itemsize_hdlc = 8192; /*Size of item in the mempool */ @@ -64,8 +60,8 @@ static unsigned int poolsize_write_struct = 8; /* Num of items in the mempool */ static unsigned int max_clients = 15; static unsigned int threshold_client_limit = 30; /* This is the maximum number of pkt registrations supported at initialization*/ -unsigned int diag_max_registration = 500; -unsigned int diag_threshold_registration = 650; +unsigned int diag_max_reg = 600; +unsigned int diag_threshold_reg = 750; /* Timer variables */ static struct timer_list drain_timer; @@ -75,96 +71,6 @@ module_param(itemsize, uint, 0); module_param(poolsize, uint, 0); module_param(max_clients, uint, 0); -static unsigned s, entries_once = 50; -static ssize_t show_diag_registration(struct device *dev, - struct device_attribute *attr, char *buf) -{ - uint16_t i, p = 0, e; - e = s + entries_once; - e = (e > diag_max_registration)?diag_max_registration:e; - - p += sprintf(buf+p, "Registration(%d) #%d -> #%d\n", - diag_max_registration, s, e - 1); - - for (i = s; i < e ; i++) { - p += sprintf(buf+p, "#%03d cmd_code: 0x%02x, subsys_id: 0x%02x ", i, - driver->table[i].cmd_code, driver->table[i].subsys_id); - if (driver->table[i].client_id == APPS_PROC) - p += sprintf(buf+p, "APPS_PROC(%d)\n", - driver->table[i].process_id); - else if (driver->table[i].client_id == MODEM_PROC) - p += sprintf(buf+p, "MODEM_PROC\n"); - else if (driver->table[i].client_id == QDSP_PROC) - p += sprintf(buf+p, "QDSP_PROC\n"); - else if (driver->table[i].client_id == WCNSS_PROC) - p += sprintf(buf+p, "WCNSS_PROC\n"); - else - p += sprintf(buf+p, "UNKNOWN SOURCE\n"); - } - - return p; - -} - -static ssize_t store_registration_index(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long u; - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) - return ret; - if (u > diag_max_registration) - return 0; - s = u; - return count; -} - -unsigned diag7k_debug_mask = DIAGLOG_MODE_NONE; -unsigned diag9k_debug_mask = DIAGLOG_MODE_NONE; -static ssize_t show_diag_debug_mask(struct device *dev, - struct device_attribute *attr, char *buf) -{ - uint16_t p = 0; - - p += sprintf(buf+p, "diag7k_debug_mask: %d\n" - "diag9k_debug_mask: %d\n", - diag7k_debug_mask, diag9k_debug_mask); - - return p; -} - -static ssize_t store_diag7k_debug_mask(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long u; - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) - return ret; - diag7k_debug_mask = u; - return count; -} - -static ssize_t store_diag9k_debug_mask(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int ret; - unsigned long u; - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) - return ret; - diag9k_debug_mask = u; - return count; -} - -static DEVICE_ATTR(diag_reg_table, 0664, - show_diag_registration, store_registration_index); -static DEVICE_ATTR(diag7k_debug_mask, 0664, - show_diag_debug_mask, store_diag7k_debug_mask); -static DEVICE_ATTR(diag9k_debug_mask, 0664, - show_diag_debug_mask, store_diag9k_debug_mask); - /* delayed_rsp_id 0 represents no delay in the response. Any other number means that the diag packet has a delayed response. */ static uint16_t delayed_rsp_id = 1; @@ -177,9 +83,8 @@ static uint16_t delayed_rsp_id = 1; #define COPY_USER_SPACE_OR_EXIT(buf, data, length) \ do { \ - if (count < ret+length) \ - goto exit; \ - if (copy_to_user(buf, (void *)&data, length)) { \ + if ((count < ret+length) || (copy_to_user(buf, \ + (void *)&data, length))) { \ ret = -EFAULT; \ goto exit; \ } \ @@ -248,8 +153,6 @@ static int diagchar_open(struct inode *inode, struct file *file) { int i = 0; void *temp; - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); if (driver) { mutex_lock(&driver->diagchar_mutex); @@ -281,15 +184,16 @@ static int diagchar_open(struct inode *inode, struct file *file) } else { mutex_unlock(&driver->diagchar_mutex); pr_alert("Max client limit for DIAG reached\n"); - DIAG_INFO("Cannot open handle %s" + pr_info("Cannot open handle %s" " %d", current->comm, current->tgid); for (i = 0; i < driver->num_clients; i++) - DIAG_WARNING("%d) %s PID=%d", i, driver-> + pr_debug("%d) %s PID=%d", i, driver-> client_map[i].name, driver->client_map[i].pid); return -ENOMEM; } } + driver->data_ready[i] = 0x0; driver->data_ready[i] |= MSG_MASKS_TYPE; driver->data_ready[i] |= EVENT_MASKS_TYPE; driver->data_ready[i] |= LOG_MASKS_TYPE; @@ -319,19 +223,19 @@ static int diagchar_close(struct inode *inode, struct file *file) return -ENOMEM; } - if (driver) { #ifdef CONFIG_DIAG_OVER_USB - /* If the SD logging process exits, change logging to USB mode */ - if (driver->logging_process_id == current->tgid) { - driver->logging_mode = USB_MODE; - diagfwd_connect(); - } + /* If the SD logging process exits, change logging to USB mode */ + if (driver->logging_process_id == current->tgid) { + driver->logging_mode = USB_MODE; + diagfwd_connect(); + } #endif /* DIAG over USB */ - /* Delete the pkt response table entry for the exiting process */ - for (i = 0; i < diag_max_registration; i++) - if (driver->table[i].process_id == current->tgid) - driver->table[i].process_id = 0; + /* Delete the pkt response table entry for the exiting process */ + for (i = 0; i < diag_max_reg; i++) + if (driver->table[i].process_id == current->tgid) + driver->table[i].process_id = 0; + if (driver) { mutex_lock(&driver->diagchar_mutex); driver->ref_count--; /* On Client exit, try to destroy all 3 pools */ @@ -357,7 +261,7 @@ void diag_clear_reg(int proc_num) { int i; - for (i = 0; i < diag_max_registration; i++) { + for (i = 0; i < diag_max_reg; i++) { if (driver->table[i].client_id == proc_num) { driver->table[i].process_id = 0; } @@ -389,14 +293,11 @@ long diagchar_ioctl(struct file *filp, int success = -1; void *temp_buf; - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - if (iocmd == DIAG_IOCTL_COMMAND_REG) { struct bindpkt_params_per_process *pkt_params = (struct bindpkt_params_per_process *) ioarg; mutex_lock(&driver->diagchar_mutex); - for (i = 0; i < diag_max_registration; i++) { + for (i = 0; i < diag_max_reg; i++) { if (driver->table[i].process_id == 0) { diag_add_reg(i, pkt_params->params, &success, &count_entries); @@ -408,19 +309,20 @@ long diagchar_ioctl(struct file *filp, } } } - if (i < diag_threshold_registration) { + if (i < diag_threshold_reg) { /* Increase table size by amount required */ - diag_max_registration += pkt_params->count - + diag_max_reg += pkt_params->count - count_entries; /* Make sure size doesnt go beyond threshold */ - if (diag_max_registration > diag_threshold_registration) - diag_max_registration = - diag_threshold_registration; + if (diag_max_reg > diag_threshold_reg) { + diag_max_reg = diag_threshold_reg; + pr_info("diag: best case memory allocation\n"); + } temp_buf = krealloc(driver->table, - diag_max_registration*sizeof(struct + diag_max_reg*sizeof(struct diag_master_table), GFP_KERNEL); if (!temp_buf) { - diag_max_registration -= pkt_params->count - + diag_max_reg -= pkt_params->count - count_entries; pr_alert("diag: Insufficient memory for reg."); mutex_unlock(&driver->diagchar_mutex); @@ -428,7 +330,7 @@ long diagchar_ioctl(struct file *filp, } else { driver->table = temp_buf; } - for (j = i; j < diag_max_registration; j++) { + for (j = i; j < diag_max_reg; j++) { diag_add_reg(j, pkt_params->params, &success, &count_entries); if (pkt_params->count > count_entries) { @@ -438,6 +340,7 @@ long diagchar_ioctl(struct file *filp, return success; } } + mutex_unlock(&driver->diagchar_mutex); } else { mutex_unlock(&driver->diagchar_mutex); pr_err("Max size reached, Pkt Registration failed for" @@ -469,8 +372,12 @@ long diagchar_ioctl(struct file *filp, mutex_lock(&driver->diagchar_mutex); temp = driver->logging_mode; driver->logging_mode = (int)ioarg; - if (driver->logging_mode == UART_MODE) + if (driver->logging_mode == MEMORY_DEVICE_MODE) + driver->mask_check = 1; + if (driver->logging_mode == UART_MODE) { + driver->mask_check = 0; driver->logging_mode = MEMORY_DEVICE_MODE; + } driver->logging_process_id = current->tgid; mutex_unlock(&driver->diagchar_mutex); if (temp == MEMORY_DEVICE_MODE && driver->logging_mode @@ -480,6 +387,9 @@ long diagchar_ioctl(struct file *filp, driver->in_busy_qdsp_1 = 1; driver->in_busy_qdsp_2 = 1; driver->in_busy_wcnss = 1; +#ifdef CONFIG_DIAG_SDIO_PIPE + driver->in_busy_sdio = 1; +#endif } else if (temp == NO_LOGGING_MODE && driver->logging_mode == MEMORY_DEVICE_MODE) { driver->in_busy_1 = 0; @@ -497,6 +407,13 @@ long diagchar_ioctl(struct file *filp, if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); +#ifdef CONFIG_DIAG_SDIO_PIPE + driver->in_busy_sdio = 0; + /* Poll SDIO channel to check for data */ + if (driver->sdio_ch) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); +#endif } #ifdef CONFIG_DIAG_OVER_USB else if (temp == USB_MODE && driver->logging_mode @@ -507,9 +424,7 @@ long diagchar_ioctl(struct file *filp, diagfwd_connect(); else if (temp == USB_MODE && driver->logging_mode == MEMORY_DEVICE_MODE) { - DIAG_INFO("diag: USB disconnected\n"); diagfwd_disconnect(); - DIAG_INFO("sdlogging enable\n"); driver->in_busy_1 = 0; driver->in_busy_2 = 0; driver->in_busy_qdsp_2 = 0; @@ -525,11 +440,16 @@ long diagchar_ioctl(struct file *filp, if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); - } else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode - == USB_MODE) { - DIAG_INFO("sdlogging disable\n"); - diagfwd_connect(); - } +#ifdef CONFIG_DIAG_SDIO_PIPE + driver->in_busy_sdio = 0; + /* Poll SDIO channel to check for data */ + if (driver->sdio_ch) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); +#endif + } else if (temp == MEMORY_DEVICE_MODE && + driver->logging_mode == USB_MODE) + diagfwd_connect(); #endif /* DIAG over USB */ success = 1; } @@ -542,29 +462,17 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, { int index = -1, i = 0, ret = 0; int num_data = 0, data_type; - -#ifdef SDQXDM_DEBUG - struct timeval t; -#endif - for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == current->tgid) index = i; if (index == -1) { - DIAG_ERR("%s:%s(parent:%s): tgid=%d " - "Client PID not found in table\n", __func__, - current->comm, current->parent->comm, current->tgid); - for (i = 0; i < driver->num_clients; i++) - DIAG_ERR("\t#%d: %d\n", i, driver->client_map[i].pid); + pr_err("diag: Client PID not found in table"); return -EINVAL; } wait_event_interruptible(driver->wait_q, driver->data_ready[index]); - if (diag7k_debug_mask) - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); mutex_lock(&driver->diagchar_mutex); if ((driver->data_ready[index] & USER_SPACE_LOG_TYPE) && (driver-> @@ -672,6 +580,20 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, driver->write_ptr_wcnss->length); driver->in_busy_wcnss = 0; } +#ifdef CONFIG_DIAG_SDIO_PIPE + /* copy 9K data over SDIO */ + if (driver->in_busy_sdio == 1) { + num_data++; + /*Copy the length of data being passed*/ + COPY_USER_SPACE_OR_EXIT(buf+ret, + (driver->write_ptr_mdm->length), 4); + /*Copy the actual data being passed*/ + COPY_USER_SPACE_OR_EXIT(buf+ret, + *(driver->buf_in_sdio), + driver->write_ptr_mdm->length); + driver->in_busy_sdio = 0; + } +#endif /* copy number of data fields */ COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4); ret -= 4; @@ -685,90 +607,17 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, if (driver->ch_wcnss) queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); +#ifdef CONFIG_DIAG_SDIO_PIPE + if (driver->sdio_ch) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); +#endif APPEND_DEBUG('n'); goto exit; } else if (driver->data_ready[index] & USER_SPACE_LOG_TYPE) { /* In case, the thread wakes up and the logging mode is not memory device any more, the condition needs to be cleared */ driver->data_ready[index] ^= USER_SPACE_LOG_TYPE; - } else if (driver->data_ready[index] & USERMODE_DIAGFWD) { - data_type = USERMODE_DIAGFWD; - driver->data_ready[index] ^= USERMODE_DIAGFWD; - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); - -#ifdef SDQXDM_DEBUG - do_gettimeofday(&t); - - if (driver->in_busy_1 && t.tv_sec > driver->write_ptr_1->second + 2) - pr_info("[diag-dbg] late pkt now: %ld.%04ld pkt: %d\n", - t.tv_sec, t.tv_usec/1000, driver->write_ptr_1->second); - if (driver->in_busy_2 && t.tv_sec > driver->write_ptr_2->second + 2) - pr_info("[diag-dbg] late pkt now: %ld.%04ld pkt: %d\n", - t.tv_sec, t.tv_usec/1000, driver->write_ptr_2->second); -#endif - for (i = 0; i < driver->poolsize_write_struct; i++) { - if (driver->buf_tbl[i].length > 0) { -#ifdef SDQXDM_DEBUG - if (diag7k_debug_mask) - printk(KERN_INFO "\n WRITING the buf address " - "and length is %x , %d\n", (unsigned int) - (driver->buf_tbl[i].buf), - driver->buf_tbl[i].length); -#endif - if (copy_to_user(buf+ret, (void *)driver-> - buf_tbl[i].buf, driver->buf_tbl[i].length)) - break; - - ret += driver->buf_tbl[i].length; - - diagmem_free(driver, (unsigned char *) - (driver->buf_tbl[i].buf), POOL_TYPE_HDLC); - driver->buf_tbl[i].length = 0; - driver->buf_tbl[i].buf = 0; - } - } - - /* copy modem data */ - if (driver->in_busy_1 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - *(driver->buf_in_1), - driver->write_ptr_1->length); - driver->in_busy_1 = 0; - } - if (driver->in_busy_2 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - *(driver->buf_in_2), - driver->write_ptr_2->length); - driver->in_busy_2 = 0; - } - - /* copy q6 data */ - if (driver->in_busy_qdsp_1 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_qdsp_1), - driver->write_ptr_qdsp_1->length); - driver->in_busy_qdsp_1 = 0; - } - if (driver->in_busy_qdsp_2 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_qdsp_2), driver-> - write_ptr_qdsp_2->length); - driver->in_busy_qdsp_2 = 0; - } - if (driver->ch) - queue_work(driver->diag_wq, - &(driver->diag_read_smd_work)); - if (driver->chqdsp) - queue_work(driver->diag_wq, - &(driver->diag_read_smd_qdsp_work)); - if (diag7k_debug_mask) - pr_info("%s() return %d byte\n", __func__, ret); - - goto exit; } if (driver->data_ready[index] & DEINIT_TYPE) { @@ -820,9 +669,6 @@ static int diagchar_read(struct file *file, char __user *buf, size_t count, } exit: - if (ret) - wake_lock_timeout(&driver->wake_lock, HZ / 2); - mutex_unlock(&driver->diagchar_mutex); return ret; } @@ -854,8 +700,8 @@ static int diagchar_write(struct file *file, const char __user *buf, err = copy_from_user(driver->user_space_data, buf + 4, payload_size); /* Check masks for On-Device logging */ - if (pkt_type == USER_SPACE_LOG_TYPE) { - if (!mask_request_validate((unsigned char *)buf)) { + if (driver->mask_check) { + if (!mask_request_validate(driver->user_space_data)) { pr_alert("diag: mask request Invalid\n"); return -EFAULT; } @@ -864,17 +710,32 @@ static int diagchar_write(struct file *file, const char __user *buf, #ifdef DIAG_DEBUG pr_debug("diag: user space data %d\n", payload_size); for (i = 0; i < payload_size; i++) - printk(KERN_DEBUG "\t %x", *(((unsigned char *)buf)+i)); + pr_debug("\t %x", *((driver->user_space_data)+i)); +#endif +#ifdef CONFIG_DIAG_SDIO_PIPE + /* send masks to 9k too */ + if (driver->sdio_ch) { + wait_event_interruptible(driver->wait_q, + (sdio_write_avail(driver->sdio_ch) >= + payload_size)); + if (driver->sdio_ch && (payload_size > 0)) { + sdio_write(driver->sdio_ch, (void *) + (driver->user_space_data), payload_size); + } + } #endif + /* send masks to modem now */ diag_process_hdlc((void *)(driver->user_space_data), payload_size); return 0; - } else if (pkt_type == USERMODE_DIAGFWD) { - if (diag7k_debug_mask) - pr_info("%s#%d recv %d bytes\n", __func__, __LINE__, payload_size); - buf += 4; - diag_process_hdlc((void *)buf, payload_size); - return count; + } + + if (payload_size > itemsize) { + pr_err("diag: Dropping packet, packet payload size crosses" + "4KB limit. Current payload size %d\n", + payload_size); + driver->dropped_count++; + return -EBADMSG; } buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY); @@ -927,8 +788,6 @@ static int diagchar_write(struct file *file, const char __user *buf, goto fail_free_hdlc; } buf_hdlc = NULL; - if (diag7k_debug_mask) - printk(KERN_INFO "\n size written is %d\n", driver->used); driver->used = 0; buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE, POOL_TYPE_HDLC); @@ -957,8 +816,6 @@ static int diagchar_write(struct file *file, const char __user *buf, goto fail_free_hdlc; } buf_hdlc = NULL; - if (diag7k_debug_mask) - printk(KERN_INFO "\n size written is %d\n", driver->used); driver->used = 0; buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE, POOL_TYPE_HDLC); @@ -984,8 +841,6 @@ static int diagchar_write(struct file *file, const char __user *buf, goto fail_free_hdlc; } buf_hdlc = NULL; - if (diag7k_debug_mask) - printk(KERN_INFO "\n size written is %d\n", driver->used); driver->used = 0; } @@ -1015,11 +870,11 @@ int mask_request_validate(unsigned char mask_buf[]) uint8_t subsys_id; uint16_t ss_cmd; - packet_id = mask_buf[4]; + packet_id = mask_buf[0]; if (packet_id == 0x4B) { - subsys_id = mask_buf[5]; - ss_cmd = *(uint16_t *)(mask_buf + 6); + subsys_id = mask_buf[1]; + ss_cmd = *(uint16_t *)(mask_buf + 2); /* Packets with SSID which are allowed */ switch (subsys_id) { case 0x04: /* DIAG_SUBSYS_WCDMA */ @@ -1083,315 +938,10 @@ static const struct file_operations diagcharfops = { .release = diagchar_close }; -#ifdef CONFIG_DIAG_SDIO_PIPE -static int diagcharmdm_open(struct inode *inode, struct file *file) -{ - int i = 0; - - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - if (driver) { - mutex_lock(&driver->diagcharmdm_mutex); - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == 0) - break; - - if (i < driver->num_mdmclients) { - driver->mdmclient_map[i].pid = current->tgid; - strncpy(driver->mdmclient_map[i].name, current->comm, 20); - driver->mdmclient_map[i].name[19] = '\0'; - } else { - mutex_unlock(&driver->diagcharmdm_mutex); - DIAG_INFO("%s:reach max client count\n", __func__); - for (i = 0; i < driver->num_clients; i++) - DIAG_WARNING("%d) %s PID=%d", i, driver-> - mdmclient_map[i].name, - driver->mdmclient_map[i].pid); - return -ENOMEM; - } - - driver->mdmdata_ready[i] |= MSG_MASKS_TYPE; - driver->mdmdata_ready[i] |= EVENT_MASKS_TYPE; - driver->mdmdata_ready[i] |= LOG_MASKS_TYPE; - - if (driver->ref_count == 0) - diagmem_init(driver); - driver->ref_count++; - - mutex_unlock(&driver->diagcharmdm_mutex); - return 0; - } - - return -ENOMEM; - -} - -static int diagcharmdm_close(struct inode *inode, struct file *file) -{ - - int i = 0; - - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - if (driver) { - mutex_lock(&driver->diagcharmdm_mutex); - - driver->ref_count--; - /* On Client exit, try to destroy all 3 pools */ - diagmem_exit(driver, POOL_TYPE_COPY); - diagmem_exit(driver, POOL_TYPE_HDLC); - diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT); - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == current->tgid) { - driver->mdmclient_map[i].pid = 0; - break; - } - - if (i < driver->num_mdmclients) - DIAG_INFO("%s:#%d(%d) %s close\n", __func__, - i, current->tgid, current->comm); - else - DIAG_WARNING("%s: nothing close\n", __func__); - mutex_unlock(&driver->diagcharmdm_mutex); - return 0; - } - - return -ENOMEM; -} - -static long diagcharmdm_ioctl(struct file *filp, - unsigned int iocmd, unsigned long ioarg) -{ - int success = -1; - - if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) { - mutex_lock(&driver->diagcharmdm_mutex); - driver->logging_mode = (int)ioarg; - driver->logging_process_id = current->tgid; - mutex_unlock(&driver->diagcharmdm_mutex); - if (driver->logging_mode == MEMORY_DEVICE_MODE) { - DIAG_INFO("diagcharmdm_ioctl enable\n"); - diagfwd_disconnect(); - driver->qxdm2sd_drop = 0; - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; - buf_9k = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); - if (driver->sdio_ch) - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - - } else if (driver->logging_mode == USB_MODE) { - DIAG_INFO("diagcharmdm_ioctl disable\n"); - diagfwd_connect(); - driver->qxdm2sd_drop = 1; - - kfree(buf_9k); - } - success = 1; - } - return success; -} - -static int diagcharmdm_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - - int index = -1, i = 0, ret = 0; - int num_data = 0, data_type; - - if (diag9k_debug_mask) - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == current->tgid) - index = i; - - if (index == -1) { - DIAG_ERR("%s:%s(parent:%s): tgid=%d " - "Client PID not found in table\n", __func__, - current->comm, current->parent->comm, current->tgid); - for (i = 0; i < driver->num_mdmclients; i++) - DIAG_ERR("\t#%d: %d\n", i, driver->mdmclient_map[i].pid); - return -EINVAL; - } - - wait_event_interruptible(driver->mdmwait_q, - driver->mdmdata_ready[index]); - - mutex_lock(&driver->diagcharmdm_mutex); - - if ((driver->mdmdata_ready[index] & USER_SPACE_LOG_TYPE) && (driver-> - logging_mode == MEMORY_DEVICE_MODE)) { - /*Copy the type of data being passed*/ - data_type = driver->data_ready[index] & USER_SPACE_LOG_TYPE; - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); - /* place holder for number of data field */ - ret += 4; - - if (driver->in_busy_sdio_1 == 1) { - - num_data++; - /*Copy the length of data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - (driver->write_ptr_mdm_1->length), 4); - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_1), driver-> - write_ptr_mdm_1->length); - driver->in_busy_sdio_1 = 0; - } - if (driver->in_busy_sdio_2 == 1) { - - num_data++; - /*Copy the length of data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, - (driver->write_ptr_mdm_2->length), 4); - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_2), driver-> - write_ptr_mdm_2->length); - driver->in_busy_sdio_2 = 0; - } - - /* copy number of data fields */ - COPY_USER_SPACE_OR_EXIT(buf+4, num_data, 4); - ret -= 4; - - driver->mdmdata_ready[index] ^= USER_SPACE_LOG_TYPE; - - if (driver->sdio_ch) - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - - goto exit; - } else if (driver->mdmdata_ready[index] & USER_SPACE_LOG_TYPE) { - /* In case, the thread wakes up and the logging mode is - not memory device any more, the condition needs to be cleared */ - driver->mdmdata_ready[index] ^= USER_SPACE_LOG_TYPE; - } else if (driver->mdmdata_ready[index] & USERMODE_DIAGFWD) { - data_type = USERMODE_DIAGFWD; - driver->mdmdata_ready[index] ^= USERMODE_DIAGFWD; - COPY_USER_SPACE_OR_EXIT(buf, data_type, 4); - - if (driver->in_busy_sdio_1 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_1), driver-> - write_ptr_mdm_1->length); - driver->in_busy_sdio_1 = 0; - } - if (driver->in_busy_sdio_2 == 1) { - /*Copy the actual data being passed*/ - COPY_USER_SPACE_OR_EXIT(buf+ret, *(driver-> - buf_in_sdio_2), driver-> - write_ptr_mdm_2->length); - driver->in_busy_sdio_2 = 0; - } - if (driver->sdio_ch) - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - goto exit; - } - - if (driver->mdmdata_ready[index] & DEINIT_TYPE) { - - driver->mdmdata_ready[index] ^= DEINIT_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & MSG_MASKS_TYPE) { - - driver->mdmdata_ready[index] ^= MSG_MASKS_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & EVENT_MASKS_TYPE) { - - driver->mdmdata_ready[index] ^= EVENT_MASKS_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & LOG_MASKS_TYPE) { - - driver->mdmdata_ready[index] ^= LOG_MASKS_TYPE; - goto exit; - } - - if (driver->mdmdata_ready[index] & PKT_TYPE) { - - driver->mdmdata_ready[index] ^= PKT_TYPE; - goto exit; - } -exit: - mutex_unlock(&driver->diagcharmdm_mutex); - - return ret; -} - -static int diagcharmdm_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - - int err, pkt_type; - int payload_size; - - if (diag9k_debug_mask) - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); - - -#ifdef CONFIG_DIAG_OVER_USB - if (((driver->logging_mode == USB_MODE) && (!driver->usb_connected)) || - (driver->logging_mode == NO_LOGGING_MODE)) { - /*Drop the diag payload */ - return -EIO; - } -#endif /* DIAG over USB */ - - /* Get the packet type F3/log/event/Pkt response */ - err = copy_from_user((&pkt_type), buf, 4); - /*First 4 bytes indicate the type of payload - ignore these */ - payload_size = count - 4; - if (pkt_type == USER_SPACE_LOG_TYPE) { - if (diag9k_debug_mask) - DIAGFWD_INFO("writing mask file\n"); - if (!mask_request_validate((unsigned char *)buf)) { - DIAG_ERR("mask request Invalid ..cannot send to modem \n"); - return -EFAULT; - } - buf = buf + 4; - if (driver->sdio_ch) { - memcpy(buf_9k, buf, payload_size); - sdio_write(driver->sdio_ch, buf_9k, payload_size); - } - return count; - } else if (pkt_type == USERMODE_DIAGFWD) { - buf += 4; - if (driver->sdio_ch) { - memcpy(buf_9k, buf, payload_size); - sdio_write(driver->sdio_ch, buf_9k, payload_size); - } - return count; - } - return 0; -} - -static const struct file_operations diagcharmdmfops = { - .owner = THIS_MODULE, - .read = diagcharmdm_read, - .write = diagcharmdm_write, - .unlocked_ioctl = diagcharmdm_ioctl, - .open = diagcharmdm_open, - .release = diagcharmdm_close -}; -#endif - static int diagchar_setup_cdev(dev_t devno) { + int err; - struct device *diagdev; cdev_init(driver->cdev, &diagcharfops); @@ -1412,43 +962,15 @@ static int diagchar_setup_cdev(dev_t devno) return -1; } - diagdev = device_create(driver->diagchar_class, NULL, devno, + device_create(driver->diagchar_class, NULL, devno, (void *)driver, "diag"); - - err = device_create_file(diagdev, &dev_attr_diag_reg_table); - if (err) - DIAG_INFO("dev_attr_diag_reg_table registration failed !\n\n"); - err = device_create_file(diagdev, &dev_attr_diag7k_debug_mask); - if (err) - DIAG_INFO("dev_attr_diag7k_debug_mask registration failed !\n\n"); - err = device_create_file(diagdev, &dev_attr_diag9k_debug_mask); - if (err) - DIAG_INFO("dev_attr_diag9k_debug_mask registration failed !\n\n"); - -#ifdef CONFIG_DIAG_SDIO_PIPE - cdev_init(driver->cdev_mdm, &diagcharmdmfops); - - driver->cdev_mdm->owner = THIS_MODULE; - driver->cdev_mdm->ops = &diagcharmdmfops; - - err = cdev_add(driver->cdev_mdm, devno+1, 1); - - if (err) { - DIAG_ERR("diagchar cdev mdm registration failed !\n\n"); - return -1; - } - - device_create(driver->diagchar_class, NULL, devno+1, (void *)driver, "diag_mdm"); -#endif return 0; } static int diagchar_cleanup(void) { - DIAG_INFO("%s:%s(parent:%s): tgid=%d\n", __func__, - current->comm, current->parent->comm, current->tgid); if (driver) { if (driver->cdev) { /* TODO - Check if device exists before deleting */ @@ -1459,7 +981,6 @@ static int diagchar_cleanup(void) } if (!IS_ERR(driver->diagchar_class)) class_destroy(driver->diagchar_class); - wake_lock_destroy(&driver->wake_lock); kfree(driver); } return 0; @@ -1468,7 +989,7 @@ static int diagchar_cleanup(void) #ifdef CONFIG_DIAG_SDIO_PIPE void diag_sdio_fn(int type) { - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { if (type == INIT) diagfwd_sdio_init(); else if (type == EXIT) @@ -1479,12 +1000,24 @@ void diag_sdio_fn(int type) inline void diag_sdio_fn(int type) {} #endif +#ifdef CONFIG_DIAG_HSIC_PIPE +void diag_hsic_fn(int type) +{ + if (type == INIT) + diagfwd_hsic_init(); + else if (type == EXIT) + diagfwd_hsic_exit(); +} +#else +inline void diag_hsic_fn(int type) {} +#endif + static int __init diagchar_init(void) { dev_t dev; int error; - DIAG_INFO("diagfwd initializing ..\n"); + pr_debug("diagfwd initializing ..\n"); driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL); if (driver) { @@ -1500,10 +1033,9 @@ static int __init diagchar_init(void) driver->poolsize_write_struct = poolsize_write_struct; driver->num_clients = max_clients; driver->logging_mode = USB_MODE; + driver->mask_check = 0; mutex_init(&driver->diagchar_mutex); init_waitqueue_head(&driver->wait_q); - wake_lock_init(&driver->wake_lock, WAKE_LOCK_SUSPEND, "diagchar"); - INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn); INIT_WORK(&(driver->diag_read_smd_work), diag_read_smd_work_fn); INIT_WORK(&(driver->diag_read_smd_cntl_work), @@ -1516,25 +1048,12 @@ static int __init diagchar_init(void) diag_read_smd_wcnss_work_fn); INIT_WORK(&(driver->diag_read_smd_wcnss_cntl_work), diag_read_smd_wcnss_cntl_work_fn); -#ifdef CONFIG_DIAG_SDIO_PIPE - driver->num_mdmclients = 1; - init_waitqueue_head(&driver->mdmwait_q); - spin_lock_init(&driver->diagchar_lock); - mutex_init(&driver->diagcharmdm_mutex); - - driver->num = 2; -#else - driver->num = 1; -#endif diagfwd_init(); - if (chk_config_get_id() == AO8960_TOOLS_ID) { - diagfwd_cntl_init(); - DIAGFWD_INFO("CNTL channel was enabled in the platform\n"); - } else - DIAGFWD_INFO("CNTL channel was not enabled in the platform\n"); - + diagfwd_cntl_init(); diag_sdio_fn(INIT); + diag_hsic_fn(INIT); pr_debug("diagchar initializing ..\n"); + driver->num = 1; driver->name = ((void *)driver) + sizeof(struct diagchar_dev); strlcpy(driver->name, "diag", 4); @@ -1549,10 +1068,6 @@ static int __init diagchar_init(void) goto fail; } driver->cdev = cdev_alloc(); - -#ifdef CONFIG_DIAG_SDIO_PIPE - driver->cdev_mdm = cdev_alloc(); -#endif error = diagchar_setup_cdev(dev); if (error) goto fail; @@ -1561,7 +1076,7 @@ static int __init diagchar_init(void) goto fail; } - DIAG_INFO("diagchar initialized\n"); + pr_info("diagchar initialized now"); return 0; fail: @@ -1569,6 +1084,7 @@ static int __init diagchar_init(void) diagfwd_exit(); diagfwd_cntl_exit(); diag_sdio_fn(EXIT); + diag_hsic_fn(EXIT); return -1; } @@ -1579,9 +1095,9 @@ static void __exit diagchar_exit(void) ensure no memory leaks */ diagmem_exit(driver, POOL_TYPE_ALL); diagfwd_exit(); - if (chk_config_get_id() == AO8960_TOOLS_ID) - diagfwd_cntl_exit(); + diagfwd_cntl_exit(); diag_sdio_fn(EXIT); + diag_hsic_fn(EXIT); diagchar_cleanup(); printk(KERN_INFO "done diagchar exit\n"); } diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index dd0f0314349..e11afa8d00e 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -9,7 +9,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - #include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #ifdef CONFIG_DIAG_OVER_USB #include #endif @@ -36,22 +36,36 @@ #ifdef CONFIG_DIAG_SDIO_PIPE #include "diagfwd_sdio.h" #endif -#define MODE_CMD 41 -#define RESET_ID 2 +#define MODE_CMD 41 +#define RESET_ID 2 +#define ALL_EQUIP_ID 100 +#define ALL_SSID -1 +#define MAX_SSID_PER_RANGE 100 -int is_wcnss_used; int diag_debug_buf_idx; unsigned char diag_debug_buf[1024]; static unsigned int buf_tbl_size = 8; /*Number of entries in table of buffers */ -int sdio_diag_initialized; -int smd_diag_initialized; -#if DIAG_XPST -static int diag_smd_function_mode; -#endif struct diag_master_table entry; -smd_channel_t *ch_temp; +smd_channel_t *ch_temp, *chqdsp_temp, *ch_wcnss_temp; +int diag_event_num_bytes; +int diag_event_config; struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; +struct mask_info { + int equip_id; + int num_items; + int index; +}; + +#define CREATE_MSG_MASK_TBL_ROW(XX) \ +do { \ + *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX; \ + msg_mask_tbl_ptr += 4; \ + *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX ## _LAST; \ + msg_mask_tbl_ptr += 4; \ + /* increment by MAX_SSID_PER_RANGE cells */ \ + msg_mask_tbl_ptr += MAX_SSID_PER_RANGE * sizeof(int); \ +} while (0) #define ENCODE_RSP_AND_SEND(buf_length) \ do { \ @@ -61,7 +75,7 @@ do { \ send.terminate = 1; \ if (!driver->in_busy_1) { \ enc.dest = driver->buf_in_1; \ - enc.dest_last = (void *)(driver->buf_in_1 + 499); \ + enc.dest_last = (void *)(driver->buf_in_1 + APPS_BUF_SIZE - 1);\ diag_hdlc_encode(&send, &enc); \ driver->write_ptr_1->buf = driver->buf_in_1; \ driver->write_ptr_1->length = (int)(enc.dest - \ @@ -69,40 +83,79 @@ do { \ driver->in_busy_1 = 1; \ diag_device_write(driver->buf_in_1, MODEM_DATA, \ driver->write_ptr_1); \ - memset(driver->apps_rsp_buf, '\0', 500); \ + memset(driver->apps_rsp_buf, '\0', APPS_BUF_SIZE); \ } \ } while (0) #define CHK_OVERFLOW(bufStart, start, end, length) \ ((bufStart <= start) && (end - start >= length)) ? 1 : 0 -int chk_config_get_id() +int chk_config_get_id(void) { + /* For all Fusion targets, Modem will always be present */ + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) + return 0; + switch (socinfo_get_id()) { case APQ8060_MACHINE_ID: case MSM8660_MACHINE_ID: return APQ8060_TOOLS_ID; case AO8960_MACHINE_ID: + case MSM8260A_MACHINE_ID: return AO8960_TOOLS_ID; + case APQ8064_MACHINE_ID: + return APQ8064_TOOLS_ID; + case MSM8930_MACHINE_ID: + return MSM8930_TOOLS_ID; + case MSM8974_MACHINE_ID: + return MSM8974_TOOLS_ID; default: return 0; } } +/* + * This will return TRUE for targets which support apps only mode and hence SSR. + * This applies to 8960 and newer targets. + */ +int chk_apps_only(void) +{ + switch (socinfo_get_id()) { + case AO8960_MACHINE_ID: + case APQ8064_MACHINE_ID: + case MSM8930_MACHINE_ID: + case MSM8630_MACHINE_ID: + case MSM8230_MACHINE_ID: + case APQ8030_MACHINE_ID: + case MSM8627_MACHINE_ID: + case MSM8227_MACHINE_ID: + case MSM8974_MACHINE_ID: + case MDM9615_MACHINE_ID: + case MSM8260A_MACHINE_ID: + return 1; + default: + return 0; + } +} + +/* + * This will return TRUE for targets which support apps as master. + * Thus, SW DLOAD and Mode Reset are supported on apps processor. + * This applies to 8960 and newer targets. + */ +int chk_apps_master(void) +{ + if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm9615()) + return 1; + else + return 0; +} + void __diag_smd_send_req(void) { void *buf = NULL; int *in_busy_ptr = NULL; struct diag_request *write_ptr_modem = NULL; -#if DIAG_XPST - int type; -#endif - -#ifdef SDQXDM_DEBUG - static struct timeval t0 = {0, 0}, t1; - static int full, empty; - long diff; -#endif if (!driver->in_busy_1) { buf = driver->buf_in_1; @@ -135,68 +188,12 @@ void __diag_smd_send_req(void) APPEND_DEBUG('i'); smd_read(driver->ch, buf, r); APPEND_DEBUG('j'); - if (diag7k_debug_mask) { - switch (diag7k_debug_mask) { - case DIAGLOG_MODE_HEAD: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem(first 16 bytes)", 16, 1, DUMP_PREFIX_ADDRESS, buf, 16, 1); - break; - case DIAGLOG_MODE_FULL: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem(first 16 bytes)", 16, 1, DUMP_PREFIX_ADDRESS, buf, 16, 1); - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem(last 16 bytes) ", 16, 1, DUMP_PREFIX_ADDRESS, buf+r-16, 16, 1); - break; - default: - #if 0 - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from modem ", 16, 1, DUMP_PREFIX_ADDRESS, buf, r, 1); - #endif - break; - } - } - -#if DIAG_XPST - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 0); - return; - } -#endif - -#ifdef SDQXDM_DEBUG - if (full) { - pr_err("[diag-dbg] buffer become available %d %d, read %d\n", - driver->in_busy_1, driver->in_busy_2, r); - full = 0; - } - do_gettimeofday(&t1); - diff = (t1.tv_sec-t0.tv_sec)*1000 + (t1.tv_usec-t0.tv_usec)/1000; - if (diff > 1000) { - pr_err("[diag-dbg] Over time (%ld) %ld.%04ld -> %ld.%04ld empty = %d\n", - diff, (long)t0.tv_sec, t0.tv_usec/1000, - (long)t1.tv_sec, t1.tv_usec/1000, empty); - } - write_ptr_modem->second = t1.tv_sec; - t0 = t1; - empty = 0; -#endif write_ptr_modem->length = r; *in_busy_ptr = 1; diag_device_write(buf, MODEM_DATA, write_ptr_modem); } } -#ifdef SDQXDM_DEBUG - else - empty++; -#endif - } else { -#ifdef SDQXDM_DEBUG - if (!full && driver->ch) - pr_info("[diag-dbg] Buffer full, %d bytes pending.\n", smd_read_avail(driver->ch)); - full = 1; -#endif } } @@ -214,35 +211,18 @@ int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr) #ifdef DIAG_DEBUG pr_debug("diag: ENQUEUE buf ptr" " and length is %x , %d\n", - (unsigned int)(driver->buf_tbl[i].buf), driver->buf_tbl[i].length); + (unsigned int)(driver->buf_ + tbl[i].buf), driver->buf_tbl[i].length); #endif break; } } -#ifdef CONFIG_DIAG_SDIO_PIPE - if (proc_num == SDIO_DATA) { - - for (i = 0; i < driver->num_mdmclients; i++) - if (driver->mdmclient_map[i].pid == - driver->logging_process_id) - break; - - if (i < driver->num_mdmclients) { - driver->mdmdata_ready[i] |= USERMODE_DIAGFWD; - wake_up_interruptible(&driver->mdmwait_q); - - return err; - } else - return -EINVAL; - } -#endif for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == driver->logging_process_id) break; if (i < driver->num_clients) { - wake_lock_timeout(&driver->wake_lock, HZ / 2); - driver->data_ready[i] |= USERMODE_DIAGFWD; + driver->data_ready[i] |= USER_SPACE_LOG_TYPE; wake_up_interruptible(&driver->wait_q); } else return -EINVAL; @@ -262,6 +242,13 @@ int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr) queue_work(driver->diag_wq, &(driver-> diag_read_smd_wcnss_work)); } +#ifdef CONFIG_DIAG_SDIO_PIPE + else if (proc_num == SDIO_DATA) { + driver->in_busy_sdio = 0; + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); + } +#endif err = -1; } #ifdef CONFIG_DIAG_OVER_USB @@ -296,11 +283,23 @@ int diag_device_write(void *buf, int proc_num, struct diag_request *write_ptr) } #ifdef CONFIG_DIAG_SDIO_PIPE else if (proc_num == SDIO_DATA) { - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || + machine_is_msm8x60_fusn_ffa()) { + write_ptr->buf = buf; + err = usb_diag_write(driver->mdm_ch, write_ptr); + } else + pr_err("diag: Incorrect sdio data " + "while USB write\n"); + } +#endif +#ifdef CONFIG_DIAG_HSIC_PIPE + else if (proc_num == HSIC_DATA) { + if (driver->hsic_device_enabled) { write_ptr->buf = buf; err = usb_diag_write(driver->mdm_ch, write_ptr); } else - pr_err("diag: Incorrect data while USB write"); + pr_err("diag: Incorrect hsic data " + "while USB write\n"); } #endif APPEND_DEBUG('d'); @@ -314,9 +313,6 @@ void __diag_smd_wcnss_send_req(void) void *buf = driver->buf_in_wcnss; int *in_busy_wcnss_ptr = &(driver->in_busy_wcnss); struct diag_request *write_ptr_wcnss = driver->write_ptr_wcnss; -#if DIAG_XPST - int type; -#endif if ((!driver->in_busy_wcnss) && driver->ch_wcnss && buf) { int r = smd_read_avail(driver->ch_wcnss); @@ -336,13 +332,6 @@ void __diag_smd_wcnss_send_req(void) APPEND_DEBUG('i'); smd_read(driver->ch_wcnss, buf, r); APPEND_DEBUG('j'); -#if DIAG_XPST - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 0); - return; - } -#endif write_ptr_wcnss->length = r; *in_busy_wcnss_ptr = 1; diag_device_write(buf, WCNSS_DATA, @@ -357,9 +346,7 @@ void __diag_smd_qdsp_send_req(void) void *buf = NULL; int *in_busy_qdsp_ptr = NULL; struct diag_request *write_ptr_qdsp = NULL; -#if DIAG_XPST - int type; -#endif + if (!driver->in_busy_qdsp_1) { buf = driver->buf_in_qdsp_1; write_ptr_qdsp = driver->write_ptr_qdsp_1; @@ -391,13 +378,6 @@ void __diag_smd_qdsp_send_req(void) APPEND_DEBUG('i'); smd_read(driver->chqdsp, buf, r); APPEND_DEBUG('j'); -#if DIAG_XPST - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 0); - return; - } -#endif write_ptr_qdsp->length = r; *in_busy_qdsp_ptr = 1; diag_device_write(buf, QDSP_DATA, @@ -415,7 +395,7 @@ static void diag_print_mask_table(void) int last; uint8_t *ptr = driver->msg_masks; int i = 0; - + pr_info("diag: F3 message mask table\n"); while (*(uint32_t *)(ptr + 4)) { first = *(uint32_t *)ptr; ptr += 4; @@ -424,12 +404,63 @@ static void diag_print_mask_table(void) printk(KERN_INFO "SSID %d - %d\n", first, last); for (i = 0 ; i <= last - first ; i++) printk(KERN_INFO "MASK:%x\n", *((uint32_t *)ptr + i)); - ptr += ((last - first) + 1)*4; + ptr += MAX_SSID_PER_RANGE*4; } #endif } +void diag_create_msg_mask_table(void) +{ + uint8_t *msg_mask_tbl_ptr = driver->msg_masks; + + CREATE_MSG_MASK_TBL_ROW(0); + CREATE_MSG_MASK_TBL_ROW(1); + CREATE_MSG_MASK_TBL_ROW(2); + CREATE_MSG_MASK_TBL_ROW(3); + CREATE_MSG_MASK_TBL_ROW(4); + CREATE_MSG_MASK_TBL_ROW(5); + CREATE_MSG_MASK_TBL_ROW(6); + CREATE_MSG_MASK_TBL_ROW(7); + CREATE_MSG_MASK_TBL_ROW(8); + CREATE_MSG_MASK_TBL_ROW(9); + CREATE_MSG_MASK_TBL_ROW(10); + CREATE_MSG_MASK_TBL_ROW(11); + CREATE_MSG_MASK_TBL_ROW(12); + CREATE_MSG_MASK_TBL_ROW(13); + CREATE_MSG_MASK_TBL_ROW(14); + CREATE_MSG_MASK_TBL_ROW(15); + CREATE_MSG_MASK_TBL_ROW(16); + CREATE_MSG_MASK_TBL_ROW(17); + CREATE_MSG_MASK_TBL_ROW(18); + CREATE_MSG_MASK_TBL_ROW(19); + CREATE_MSG_MASK_TBL_ROW(20); + CREATE_MSG_MASK_TBL_ROW(21); + CREATE_MSG_MASK_TBL_ROW(22); +} + +static void diag_set_msg_mask(int rt_mask) +{ + int first_ssid, last_ssid, i; + uint8_t *parse_ptr, *ptr = driver->msg_masks; + + mutex_lock(&driver->diagchar_mutex); + while (*(uint32_t *)(ptr + 4)) { + first_ssid = *(uint32_t *)ptr; + ptr += 4; + last_ssid = *(uint32_t *)ptr; + ptr += 4; + parse_ptr = ptr; + pr_debug("diag: updating range %d %d\n", first_ssid, last_ssid); + for (i = 0; i < last_ssid - first_ssid + 1; i++) { + *(int *)parse_ptr = rt_mask; + parse_ptr += 4; + } + ptr += MAX_SSID_PER_RANGE * 4; + } + mutex_unlock(&driver->diagchar_mutex); +} + static void diag_update_msg_mask(int start, int end , uint8_t *buf) { int found = 0; @@ -440,8 +471,8 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) uint8_t *ptr_buffer_end = &(*(driver->msg_masks)) + MSG_MASK_SIZE; mutex_lock(&driver->diagchar_mutex); - /* First SSID can be zero : So check that last is non-zero */ + /* First SSID can be zero : So check that last is non-zero */ while (*(uint32_t *)(ptr + 4)) { first = *(uint32_t *)ptr; ptr += 4; @@ -452,9 +483,11 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) if (end <= last) if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end, - (((end - start)+1)*4))) + (((end - start)+1)*4))) { + pr_debug("diag: update ssid start %d," + " end %d\n", start, end); memcpy(ptr, buf , ((end - start)+1)*4); - else + } else printk(KERN_CRIT "Not enough" " buffer space for" " MSG_MASK\n"); @@ -465,7 +498,7 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) found = 1; break; } else { - ptr += ((last - first) + 1)*4; + ptr += MAX_SSID_PER_RANGE*4; } } /* Entry was not found - add new table */ @@ -476,6 +509,8 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) ptr += 4; memcpy(ptr, &(end), 4); ptr += 4; + pr_debug("diag: adding NEW ssid start %d, end %d\n", + start, end); memcpy(ptr, buf , ((end - start) + 1)*4); } else printk(KERN_CRIT " Not enough buffer" @@ -486,7 +521,19 @@ static void diag_update_msg_mask(int start, int end , uint8_t *buf) } -static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bits) +void diag_toggle_event_mask(int toggle) +{ + uint8_t *ptr = driver->event_masks; + + mutex_lock(&driver->diagchar_mutex); + if (toggle) + memset(ptr, 0xFF, EVENT_MASK_SIZE); + else + memset(ptr, 0, EVENT_MASK_SIZE); + mutex_unlock(&driver->diagchar_mutex); +} + +static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bytes) { uint8_t *ptr = driver->event_masks; uint8_t *temp = buf + 2; @@ -496,27 +543,41 @@ static void diag_update_event_mask(uint8_t *buf, int toggle, int num_bits) memset(ptr, 0 , EVENT_MASK_SIZE); else if (CHK_OVERFLOW(ptr, ptr, - ptr+EVENT_MASK_SIZE, - num_bits/8 + 1)) - memcpy(ptr, temp , num_bits/8 + 1); + ptr+EVENT_MASK_SIZE, num_bytes)) + memcpy(ptr, temp , num_bytes); else printk(KERN_CRIT "Not enough buffer space " "for EVENT_MASK\n"); mutex_unlock(&driver->diagchar_mutex); } +static void diag_disable_log_mask(void) +{ + int i = 0; + struct mask_info *parse_ptr = (struct mask_info *)(driver->log_masks); + + pr_debug("diag: disable log masks\n"); + mutex_lock(&driver->diagchar_mutex); + for (i = 0; i < MAX_EQUIP_ID; i++) { + pr_debug("diag: equip id %d\n", parse_ptr->equip_id); + if (!(parse_ptr->equip_id)) /* Reached a null entry */ + break; + memset(driver->log_masks + parse_ptr->index, 0, + (parse_ptr->num_items + 7)/8); + parse_ptr++; + } + mutex_unlock(&driver->diagchar_mutex); +} + static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items) { uint8_t *temp = buf; - struct mask_info { - int equip_id; - int index; - }; int i = 0; unsigned char *ptr_data; - int offset = 8*MAX_EQUIP_ID; - struct mask_info *ptr = (struct mask_info *)driver->log_masks; + int offset = (sizeof(struct mask_info))*MAX_EQUIP_ID; + struct mask_info *ptr = (struct mask_info *)(driver->log_masks); + pr_debug("diag: received equip id = %d\n", equip_id); mutex_lock(&driver->diagchar_mutex); /* Check if we already know index of this equipment ID */ for (i = 0; i < MAX_EQUIP_ID; i++) { @@ -525,8 +586,9 @@ static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items) break; } if ((ptr->equip_id == 0) && (ptr->index == 0)) { - /*Reached a null entry */ + /* Reached a null entry */ ptr->equip_id = equip_id; + ptr->num_items = num_items; ptr->index = driver->log_masks_length; offset = driver->log_masks_length; driver->log_masks_length += ((num_items+7)/8); @@ -539,7 +601,7 @@ static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items) + LOG_MASK_SIZE, (num_items+7)/8)) memcpy(ptr_data, temp , (num_items+7)/8); else - printk(KERN_CRIT " Not enough buffer space for LOG_MASK\n"); + pr_err("diag: Not enough buffer space for LOG_MASK\n"); mutex_unlock(&driver->diagchar_mutex); } @@ -592,9 +654,10 @@ void diag_send_data(struct diag_master_table entry, unsigned char *buf, } else { if (len > 0) { if (entry.client_id == MODEM_PROC && driver->ch) { - if (cpu_is_msm8960() && + if (chk_apps_master() && (int)(*(char *)buf) == MODE_CMD) - if ((int)(*(char *)(buf+1)) == RESET_ID) + if ((int)(*(char *)(buf+1)) == + RESET_ID) return; smd_write(driver->ch, buf, len); } else if (entry.client_id == QDSP_PROC && @@ -610,11 +673,186 @@ void diag_send_data(struct diag_master_table entry, unsigned char *buf, } } +void diag_modem_mask_update_fn(struct work_struct *work) +{ + diag_send_msg_mask_update(driver->ch_cntl, ALL_SSID, + ALL_SSID, MODEM_PROC); + diag_send_log_mask_update(driver->ch_cntl, ALL_EQUIP_ID); + diag_send_event_mask_update(driver->ch_cntl, diag_event_num_bytes); +} + +void diag_qdsp_mask_update_fn(struct work_struct *work) +{ + diag_send_msg_mask_update(driver->chqdsp_cntl, ALL_SSID, + ALL_SSID, QDSP_PROC); + diag_send_log_mask_update(driver->chqdsp_cntl, ALL_EQUIP_ID); + diag_send_event_mask_update(driver->chqdsp_cntl, diag_event_num_bytes); +} + +void diag_wcnss_mask_update_fn(struct work_struct *work) +{ + diag_send_msg_mask_update(driver->ch_wcnss_cntl, ALL_SSID, + ALL_SSID, WCNSS_PROC); + diag_send_log_mask_update(driver->ch_wcnss_cntl, ALL_EQUIP_ID); + diag_send_event_mask_update(driver->ch_wcnss_cntl, + diag_event_num_bytes); +} + +void diag_send_log_mask_update(smd_channel_t *ch, int equip_id) +{ + void *buf = driver->buf_log_mask_update; + int header_size = sizeof(struct diag_ctrl_log_mask); + struct mask_info *ptr = (struct mask_info *)driver->log_masks; + int i, size, wr_size = -ENOMEM, retry_count = 0, timer; + + mutex_lock(&driver->diag_cntl_mutex); + for (i = 0; i < MAX_EQUIP_ID; i++) { + size = (ptr->num_items+7)/8; + /* reached null entry */ + if ((ptr->equip_id == 0) && (ptr->index == 0)) + break; + driver->log_mask->cmd_type = DIAG_CTRL_MSG_LOG_MASK; + driver->log_mask->num_items = ptr->num_items; + driver->log_mask->data_len = 11 + size; + driver->log_mask->stream_id = 1; /* 2, if dual stream */ + driver->log_mask->status = 3; /* status for valid mask */ + driver->log_mask->equip_id = ptr->equip_id; + driver->log_mask->log_mask_size = size; + /* send only desired update, NOT ALL */ + if (equip_id == ALL_EQUIP_ID || equip_id == + driver->log_mask->equip_id) { + memcpy(buf, driver->log_mask, header_size); + memcpy(buf+header_size, driver->log_masks+ptr->index, + size); + if (ch) { + while (retry_count < 3) { + wr_size = smd_write(ch, buf, + header_size + size); + if (wr_size == -ENOMEM) { + retry_count++; + for (timer = 0; timer < 5; + timer++) + udelay(2000); + } else + break; + } + if (wr_size != header_size + size) + pr_err("diag: log mask update failed" + " %d, tried %d", wr_size, header_size + size); + else + pr_debug("diag: updated log equip ID %d" + ",len %d\n", driver->log_mask->equip_id, + driver->log_mask->log_mask_size); + } else + pr_err("diag: ch not valid for log update\n"); + } + ptr++; + } + mutex_unlock(&driver->diag_cntl_mutex); +} + +void diag_send_event_mask_update(smd_channel_t *ch, int num_bytes) +{ + void *buf = driver->buf_event_mask_update; + int header_size = sizeof(struct diag_ctrl_event_mask); + int wr_size = -ENOMEM, retry_count = 0, timer; + + mutex_lock(&driver->diag_cntl_mutex); + if (num_bytes == 0) { + pr_debug("diag: event mask not set yet, so no update\n"); + mutex_unlock(&driver->diag_cntl_mutex); + return; + } + /* send event mask update */ + driver->event_mask->cmd_type = DIAG_CTRL_MSG_EVENT_MASK; + driver->event_mask->data_len = 7 + num_bytes; + driver->event_mask->stream_id = 1; /* 2, if dual stream */ + driver->event_mask->status = 3; /* status for valid mask */ + driver->event_mask->event_config = diag_event_config; /* event config */ + driver->event_mask->event_mask_size = num_bytes; + memcpy(buf, driver->event_mask, header_size); + memcpy(buf+header_size, driver->event_masks, num_bytes); + if (ch) { + while (retry_count < 3) { + wr_size = smd_write(ch, buf, header_size + num_bytes); + if (wr_size == -ENOMEM) { + retry_count++; + for (timer = 0; timer < 5; timer++) + udelay(2000); + } else + break; + } + if (wr_size != header_size + num_bytes) + pr_err("diag: error writing event mask %d, tried %d\n", + wr_size, header_size + num_bytes); + } else + pr_err("diag: ch not valid for event update\n"); + mutex_unlock(&driver->diag_cntl_mutex); +} + +void diag_send_msg_mask_update(smd_channel_t *ch, int updated_ssid_first, + int updated_ssid_last, int proc) +{ + void *buf = driver->buf_msg_mask_update; + int first, last, size = -ENOMEM, retry_count = 0, timer; + int header_size = sizeof(struct diag_ctrl_msg_mask); + uint8_t *ptr = driver->msg_masks; + + mutex_lock(&driver->diag_cntl_mutex); + while (*(uint32_t *)(ptr + 4)) { + first = *(uint32_t *)ptr; + ptr += 4; + last = *(uint32_t *)ptr; + ptr += 4; + if ((updated_ssid_first >= first && updated_ssid_last <= last) + || (updated_ssid_first == ALL_SSID)) { + /* send f3 mask update */ + driver->msg_mask->cmd_type = DIAG_CTRL_MSG_F3_MASK; + driver->msg_mask->msg_mask_size = last - first + 1; + driver->msg_mask->data_len = 11 + + 4 * (driver->msg_mask->msg_mask_size); + driver->msg_mask->stream_id = 1; /* 2, if dual stream */ + driver->msg_mask->status = 3; /* status valid mask */ + driver->msg_mask->msg_mode = 0; /* Legcay mode */ + driver->msg_mask->ssid_first = first; + driver->msg_mask->ssid_last = last; + memcpy(buf, driver->msg_mask, header_size); + memcpy(buf+header_size, ptr, + 4 * (driver->msg_mask->msg_mask_size)); + if (ch) { + while (retry_count < 3) { + size = smd_write(ch, buf, header_size + + 4*(driver->msg_mask->msg_mask_size)); + if (size == -ENOMEM) { + retry_count++; + for (timer = 0; timer < 5; + timer++) + udelay(2000); + } else + break; + } + if (size != header_size + + 4*(driver->msg_mask->msg_mask_size)) + pr_err("diag: proc %d, msg mask update " + "fail %d, tried %d\n", proc, size, + header_size + 4*(driver->msg_mask->msg_mask_size)); + else + pr_debug("diag: sending mask update for" + "ssid first %d, last %d on PROC %d\n", first, last, proc); + } else + pr_err("diag: proc %d, ch invalid msg mask" + "update\n", proc); + } + ptr += MAX_SSID_PER_RANGE*4; + } + mutex_unlock(&driver->diag_cntl_mutex); +} + static int diag_process_apps_pkt(unsigned char *buf, int len) { uint16_t subsys_cmd_code; int subsys_id, ssid_first, ssid_last, ssid_range; - int packet_type = 1, i, cmd_code; + int packet_type = 1, i, cmd_code, rt_mask; unsigned char *temp = buf; int data_type; #if defined(CONFIG_DIAG_OVER_USB) @@ -622,6 +860,167 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) unsigned char *ptr; #endif + /* Set log masks */ + if (*buf == 0x73 && *(int *)(buf+4) == 3) { + buf += 8; + /* Read Equip ID and pass as first param below*/ + diag_update_log_mask(*(int *)buf, buf+8, *(int *)(buf+4)); + diag_update_userspace_clients(LOG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x73; + *(int *)(driver->apps_rsp_buf + 4) = 0x3; /* op. ID */ + *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success */ + payload_length = 8 + ((*(int *)(buf + 4)) + 7)/8; + for (i = 0; i < payload_length; i++) + *(int *)(driver->apps_rsp_buf+12+i) = *(buf+i); + if (driver->ch_cntl) + diag_send_log_mask_update(driver->ch_cntl, + *(int *)buf); + if (driver->chqdsp_cntl) + diag_send_log_mask_update(driver->chqdsp_cntl, + *(int *)buf); + if (driver->ch_wcnss_cntl) + diag_send_log_mask_update(driver->ch_wcnss_cntl, + *(int *)buf); + ENCODE_RSP_AND_SEND(12 + payload_length - 1); + return 0; + } else + buf = temp; +#endif + } /* Disable log masks */ + else if (*buf == 0x73 && *(int *)(buf+4) == 0) { + buf += 8; + /* Disable mask for each log code */ + diag_disable_log_mask(); + diag_update_userspace_clients(LOG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x73; + driver->apps_rsp_buf[1] = 0x0; + driver->apps_rsp_buf[2] = 0x0; + driver->apps_rsp_buf[3] = 0x0; + *(int *)(driver->apps_rsp_buf + 4) = 0x0; + if (driver->ch_cntl) + diag_send_log_mask_update(driver->ch_cntl, + ALL_EQUIP_ID); + if (driver->chqdsp_cntl) + diag_send_log_mask_update(driver->chqdsp_cntl, + ALL_EQUIP_ID); + if (driver->ch_wcnss_cntl) + diag_send_log_mask_update(driver->ch_wcnss_cntl, + ALL_EQUIP_ID); + ENCODE_RSP_AND_SEND(7); + return 0; + } else + buf = temp; +#endif + } /* Set runtime message mask */ + else if ((*buf == 0x7d) && (*(buf+1) == 0x4)) { + ssid_first = *(uint16_t *)(buf + 2); + ssid_last = *(uint16_t *)(buf + 4); + ssid_range = 4 * (ssid_last - ssid_first + 1); + pr_debug("diag: received mask update for ssid_first = %d," + " ssid_last = %d", ssid_first, ssid_last); + diag_update_msg_mask(ssid_first, ssid_last , buf + 8); + diag_update_userspace_clients(MSG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + for (i = 0; i < 8 + ssid_range; i++) + *(driver->apps_rsp_buf + i) = *(buf+i); + *(driver->apps_rsp_buf + 6) = 0x1; + if (driver->ch_cntl) + diag_send_msg_mask_update(driver->ch_cntl, + ssid_first, ssid_last, MODEM_PROC); + if (driver->chqdsp_cntl) + diag_send_msg_mask_update(driver->chqdsp_cntl, + ssid_first, ssid_last, QDSP_PROC); + if (driver->ch_wcnss_cntl) + diag_send_msg_mask_update(driver->ch_wcnss_cntl, + ssid_first, ssid_last, WCNSS_PROC); + ENCODE_RSP_AND_SEND(8 + ssid_range - 1); + return 0; + } else + buf = temp; +#endif + } /* Set ALL runtime message mask */ + else if ((*buf == 0x7d) && (*(buf+1) == 0x5)) { + rt_mask = *(int *)(buf + 4); + diag_set_msg_mask(rt_mask); + diag_update_userspace_clients(MSG_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x7d; /* cmd_code */ + driver->apps_rsp_buf[1] = 0x5; /* set subcommand */ + driver->apps_rsp_buf[2] = 1; /* success */ + driver->apps_rsp_buf[3] = 0; /* rsvd */ + *(int *)(driver->apps_rsp_buf + 4) = rt_mask; + /* send msg mask update to peripheral */ + if (driver->ch_cntl) + diag_send_msg_mask_update(driver->ch_cntl, + ALL_SSID, ALL_SSID, MODEM_PROC); + if (driver->chqdsp_cntl) + diag_send_msg_mask_update(driver->chqdsp_cntl, + ALL_SSID, ALL_SSID, QDSP_PROC); + if (driver->ch_wcnss_cntl) + diag_send_msg_mask_update(driver->ch_wcnss_cntl, + ALL_SSID, ALL_SSID, WCNSS_PROC); + ENCODE_RSP_AND_SEND(7); + return 0; + } else + buf = temp; +#endif + } else if (*buf == 0x82) { /* event mask change */ + buf += 4; + diag_event_num_bytes = (*(uint16_t *)buf)/8+1; + diag_update_event_mask(buf, 1, (*(uint16_t *)buf)/8+1); + diag_update_userspace_clients(EVENT_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x82; + driver->apps_rsp_buf[1] = 0x0; + *(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0; + *(uint16_t *)(driver->apps_rsp_buf + 4) = + EVENT_LAST_ID + 1; + memcpy(driver->apps_rsp_buf+6, driver->event_masks, + EVENT_LAST_ID/8+1); + if (driver->ch_cntl) + diag_send_event_mask_update(driver->ch_cntl, + diag_event_num_bytes); + if (driver->chqdsp_cntl) + diag_send_event_mask_update(driver->chqdsp_cntl, + diag_event_num_bytes); + if (driver->ch_wcnss_cntl) + diag_send_event_mask_update( + driver->ch_wcnss_cntl, diag_event_num_bytes); + ENCODE_RSP_AND_SEND(6 + EVENT_LAST_ID/8); + return 0; + } else + buf = temp; +#endif + } else if (*buf == 0x60) { + diag_event_config = *(buf+1); + diag_toggle_event_mask(*(buf+1)); + diag_update_userspace_clients(EVENT_MASKS_TYPE); +#if defined(CONFIG_DIAG_OVER_USB) + if (chk_apps_only()) { + driver->apps_rsp_buf[0] = 0x60; + driver->apps_rsp_buf[1] = 0x0; + driver->apps_rsp_buf[2] = 0x0; + if (driver->ch_cntl) + diag_send_event_mask_update(driver->ch_cntl, + diag_event_num_bytes); + if (driver->chqdsp_cntl) + diag_send_event_mask_update(driver->chqdsp_cntl, + diag_event_num_bytes); + if (driver->ch_wcnss_cntl) + diag_send_event_mask_update( + driver->ch_wcnss_cntl, diag_event_num_bytes); + ENCODE_RSP_AND_SEND(2); + return 0; + } +#endif + } /* Check for registered clients and forward packet to apropriate proc */ cmd_code = (int)(*(char *)buf); temp++; @@ -631,13 +1030,13 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) temp += 2; data_type = APPS_DATA; /* Dont send any command other than mode reset */ - if (cpu_is_msm8960() && cmd_code == MODE_CMD) { + if (chk_apps_master() && cmd_code == MODE_CMD) { if (subsys_id != RESET_ID) data_type = MODEM_DATA; } pr_debug("diag: %d %d %d", cmd_code, subsys_id, subsys_cmd_code); - for (i = 0; i < diag_max_registration; i++) { + for (i = 0; i < diag_max_reg; i++) { entry = driver->table[i]; if (entry.process_id != NO_PROCESS) { if (entry.cmd_code == cmd_code && entry.subsys_id == @@ -671,71 +1070,9 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) } } } - /* set event mask */ - if (*buf == 0x82) { - buf += 4; - diag_update_event_mask(buf, 1, *(uint16_t *)buf); - diag_update_userspace_clients(EVENT_MASKS_TYPE); - } - /* event mask change */ - else if ((*buf == 0x60) && (*(buf+1) == 0x0)) { - diag_update_event_mask(buf+1, 0, 0); - diag_update_userspace_clients(EVENT_MASKS_TYPE); -#if defined(CONFIG_DIAG_OVER_USB) - /* Check for Apps Only 8960 */ - if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) { - /* echo response back for apps only DIAG */ - driver->apps_rsp_buf[0] = 0x60; - driver->apps_rsp_buf[1] = 0x0; - driver->apps_rsp_buf[2] = 0x0; - ENCODE_RSP_AND_SEND(2); - return 0; - } -#endif - } - /* Set log masks */ - else if (*buf == 0x73 && *(int *)(buf+4) == 3) { - buf += 8; - /* Read Equip ID and pass as first param below*/ - diag_update_log_mask(*(int *)buf, buf+8, *(int *)(buf+4)); - diag_update_userspace_clients(LOG_MASKS_TYPE); #if defined(CONFIG_DIAG_OVER_USB) - /* Check for Apps Only 8960 */ - if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) { - /* echo response back for Apps only DIAG */ - driver->apps_rsp_buf[0] = 0x73; - *(int *)(driver->apps_rsp_buf + 4) = 0x3; /* op. ID */ - *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success */ - payload_length = 8 + ((*(int *)(buf + 4)) + 7)/8; - for (i = 0; i < payload_length; i++) - *(int *)(driver->apps_rsp_buf+12+i) = - *(buf+8+i); - ENCODE_RSP_AND_SEND(12 + payload_length - 1); - return 0; - } -#endif - } - /* Check for set message mask */ - else if ((*buf == 0x7d) && (*(buf+1) == 0x4)) { - ssid_first = *(uint16_t *)(buf + 2); - ssid_last = *(uint16_t *)(buf + 4); - ssid_range = 4 * (ssid_last - ssid_first + 1); - diag_update_msg_mask(ssid_first, ssid_last , buf + 8); - diag_update_userspace_clients(MSG_MASKS_TYPE); -#if defined(CONFIG_DIAG_OVER_USB) - if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID)) { - /* echo response back for apps only DIAG */ - for (i = 0; i < 8 + ssid_range; i++) - *(driver->apps_rsp_buf + i) = *(buf+i); - ENCODE_RSP_AND_SEND(8 + ssid_range - 1); - return 0; - } -#endif - } -#if defined(CONFIG_DIAG_OVER_USB) - /* Check for Apps Only 8960 & get event mask request */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) - && *buf == 0x81) { + /* Check for Apps Only & get event mask request */ + if (!(driver->ch) && chk_apps_only() && *buf == 0x81) { driver->apps_rsp_buf[0] = 0x81; driver->apps_rsp_buf[1] = 0x0; *(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0; @@ -745,8 +1082,8 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) ENCODE_RSP_AND_SEND(6 + EVENT_LAST_ID/8); return 0; } - /* Get log ID range & Check for Apps Only 8960 */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) + /* Get log ID range & Check for Apps Only */ + else if (!(driver->ch) && chk_apps_only() && (*buf == 0x73) && *(int *)(buf+4) == 1) { driver->apps_rsp_buf[0] = 0x73; *(int *)(driver->apps_rsp_buf + 4) = 0x1; /* operation ID */ @@ -771,7 +1108,7 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) return 0; } /* Respond to Get SSID Range request message */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) + else if (!(driver->ch) && chk_apps_only() && (*buf == 0x7d) && (*(buf+1) == 0x1)) { driver->apps_rsp_buf[0] = 0x7d; driver->apps_rsp_buf[1] = 0x1; @@ -816,11 +1153,19 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) *(uint16_t *)(driver->apps_rsp_buf + 78) = MSG_SSID_17_LAST; *(uint16_t *)(driver->apps_rsp_buf + 80) = MSG_SSID_18; *(uint16_t *)(driver->apps_rsp_buf + 82) = MSG_SSID_18_LAST; - ENCODE_RSP_AND_SEND(83); + *(uint16_t *)(driver->apps_rsp_buf + 84) = MSG_SSID_19; + *(uint16_t *)(driver->apps_rsp_buf + 86) = MSG_SSID_19_LAST; + *(uint16_t *)(driver->apps_rsp_buf + 88) = MSG_SSID_20; + *(uint16_t *)(driver->apps_rsp_buf + 90) = MSG_SSID_20_LAST; + *(uint16_t *)(driver->apps_rsp_buf + 92) = MSG_SSID_21; + *(uint16_t *)(driver->apps_rsp_buf + 94) = MSG_SSID_21_LAST; + *(uint16_t *)(driver->apps_rsp_buf + 96) = MSG_SSID_22; + *(uint16_t *)(driver->apps_rsp_buf + 98) = MSG_SSID_22_LAST; + ENCODE_RSP_AND_SEND(99); return 0; } - /* Check for AO8960 Respond to Get Subsys Build mask */ - else if (!(driver->ch) && (chk_config_get_id() == AO8960_TOOLS_ID) + /* Check for Apps Only Respond to Get Subsys Build mask */ + else if (!(driver->ch) && chk_apps_only() && (*buf == 0x7d) && (*(buf+1) == 0x2)) { ssid_first = *(uint16_t *)(buf + 2); ssid_last = *(uint16_t *)(buf + 4); @@ -911,12 +1256,28 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_18[i/4]; break; + case MSG_SSID_19: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_19[i/4]; + break; + case MSG_SSID_20: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_20[i/4]; + break; + case MSG_SSID_21: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_21[i/4]; + break; + case MSG_SSID_22: + for (i = 0; i < ssid_range; i += 4) + *(int *)(ptr + i) = msg_bld_masks_22[i/4]; + break; } ENCODE_RSP_AND_SEND(8 + ssid_range - 1); return 0; } /* Check for download command */ - else if ((cpu_is_msm8x60() || cpu_is_msm8960()) && (*buf == 0x3A)) { + else if ((cpu_is_msm8x60() || chk_apps_master()) && (*buf == 0x3A)) { /* send response back */ driver->apps_rsp_buf[0] = *buf; ENCODE_RSP_AND_SEND(0); @@ -971,6 +1332,11 @@ static int diag_process_apps_pkt(unsigned char *buf, int len) void diag_send_error_rsp(int index) { int i; + + if (index > 490) { + pr_err("diag: error response too huge, aborting\n"); + return; + } driver->apps_rsp_buf[0] = 0x13; /* error code 13 */ for (i = 0; i < index; i++) driver->apps_rsp_buf[i+1] = *(driver->hdlc_buf+i); @@ -984,10 +1350,6 @@ void diag_process_hdlc(void *data, unsigned len) { struct diag_hdlc_decode_type hdlc; int ret, type = 0; -#ifdef DIAG_DEBUG - int i; -#endif - pr_debug("diag: HDLC decode fn, len of data %d\n", len); hdlc.dest_ptr = driver->hdlc_buf; hdlc.dest_size = USB_MAX_OUT_BUF; @@ -1011,13 +1373,13 @@ void diag_process_hdlc(void *data, unsigned len) driver->debug_flag = 0; } /* send error responses from APPS for Central Routing */ - if (type == 1 && chk_config_get_id() == AO8960_TOOLS_ID) { + if (type == 1 && chk_apps_only()) { diag_send_error_rsp(hdlc.dest_idx); type = 0; } /* implies this packet is NOT meant for apps */ if (!(driver->ch) && type == 1) { - if (chk_config_get_id() == AO8960_TOOLS_ID) { + if (chk_apps_only()) { diag_send_error_rsp(hdlc.dest_idx); } else { /* APQ 8060, Let Q6 respond */ if (driver->chqdsp) @@ -1036,11 +1398,7 @@ void diag_process_hdlc(void *data, unsigned len) /* ignore 2 bytes for CRC, one for 7E and send */ if ((driver->ch) && (ret) && (type) && (hdlc.dest_idx > 3)) { APPEND_DEBUG('g'); -#ifdef CONFIG_MODEM_DIAG_MASTER - smd_write(driver->ch, data, len); -#else smd_write(driver->ch, driver->hdlc_buf, hdlc.dest_idx - 3); -#endif APPEND_DEBUG('h'); #ifdef DIAG_DEBUG printk(KERN_INFO "writing data to SMD, pkt length %d\n", len); @@ -1076,17 +1434,14 @@ int diagfwd_connect(void) queue_work(driver->diag_wq, &(driver->diag_read_smd_work)); queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); - - if (chk_config_get_id() == AO8960_TOOLS_ID) { - /* Poll SMD CNTL channels to check for data */ - queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work)); - queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_cntl_work)); - queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_cntl_work)); - } + /* Poll SMD CNTL channels to check for data */ + diag_smd_cntl_notify(NULL, SMD_EVENT_DATA); + diag_smd_qdsp_cntl_notify(NULL, SMD_EVENT_DATA); + diag_smd_wcnss_cntl_notify(NULL, SMD_EVENT_DATA); /* Poll USB channel to check for data*/ queue_work(driver->diag_wq, &(driver->diag_read_work)); #ifdef CONFIG_DIAG_SDIO_PIPE - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { if (driver->mdm_ch && !IS_ERR(driver->mdm_ch)) diagfwd_connect_sdio(); else @@ -1101,8 +1456,7 @@ int diagfwd_disconnect(void) printk(KERN_DEBUG "diag: USB disconnected\n"); driver->usb_connected = 0; driver->debug_flag = 1; - if (driver->usb_connected) - usb_diag_free_req(driver->legacy_ch); + usb_diag_free_req(driver->legacy_ch); if (driver->logging_mode == USB_MODE) { driver->in_busy_1 = 1; driver->in_busy_2 = 1; @@ -1111,7 +1465,7 @@ int diagfwd_disconnect(void) driver->in_busy_wcnss = 1; } #ifdef CONFIG_DIAG_SDIO_PIPE - if (diag_support_mdm9k) + if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) if (driver->mdm_ch && !IS_ERR(driver->mdm_ch)) diagfwd_disconnect_sdio(); #endif @@ -1140,26 +1494,19 @@ int diagfwd_write_complete(struct diag_request *diag_write_ptr) driver->in_busy_qdsp_2 = 0; APPEND_DEBUG('P'); queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); - } else if (is_wcnss_used && buf == (void *)driver->buf_in_wcnss) { + } else if (buf == (void *)driver->buf_in_wcnss) { driver->in_busy_wcnss = 0; APPEND_DEBUG('R'); queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); -#if DIAG_XPST - } else if (driver->in_busy_dmrounter == 1) { - driver->in_busy_dmrounter = 0; -#endif } #ifdef CONFIG_DIAG_SDIO_PIPE - else if (buf == (void *)driver->buf_in_sdio_1) { - driver->in_busy_sdio_1 = 0; - APPEND_DEBUG('q'); - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - } else if (buf == (void *)driver->buf_in_sdio_2) { - driver->in_busy_sdio_2 = 0; - APPEND_DEBUG('Q'); - queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); - } + else if (buf == (void *)driver->buf_in_sdio) + if (machine_is_msm8x60_fusion() || + machine_is_msm8x60_fusn_ffa()) + diagfwd_write_complete_sdio(); + else + pr_err("diag: Incorrect buffer pointer while WRITE"); #endif else { diagmem_free(driver, (unsigned char *)buf, POOL_TYPE_HDLC); @@ -1186,14 +1533,6 @@ int diagfwd_read_complete(struct diag_request *diag_read_ptr) DUMP_PREFIX_ADDRESS, diag_read_ptr->buf, diag_read_ptr->actual, 1); #endif /* DIAG DEBUG */ -#if DIAG_XPST - if (driver->nohdlc) { - driver->usb_read_ptr->buf = driver->usb_buf_out; - driver->usb_read_ptr->length = USB_MAX_OUT_BUF; - usb_diag_read(driver->legacy_ch, driver->usb_read_ptr); - return 0; - } -#endif if (driver->logging_mode == USB_MODE) { if (status != -ECONNRESET && status != -ESHUTDOWN) queue_work(driver->diag_wq, @@ -1205,7 +1544,8 @@ int diagfwd_read_complete(struct diag_request *diag_read_ptr) } #ifdef CONFIG_DIAG_SDIO_PIPE else if (buf == (void *)driver->usb_buf_mdm_out) { - if (diag_support_mdm9k) { + if (machine_is_msm8x60_fusion() || + machine_is_msm8x60_fusn_ffa()) { driver->read_len_mdm = diag_read_ptr->actual; diagfwd_read_complete_sdio(); } else @@ -1267,11 +1607,7 @@ static void diag_smd_notify(void *ctxt, unsigned event) driver->ch = 0; return; } else if (event == SMD_EVENT_OPEN) { - if (ch_temp) - driver->ch = ch_temp; - else - DIAGFWD_INFO("%s: smd_open(%s):, ch_temp:%p, driver->ch:%p, &driver->ch:%p\n", - __func__, SMDDIAG_NAME, ch_temp, driver->ch, &driver->ch); + driver->ch = ch_temp; } queue_work(driver->diag_wq, &(driver->diag_read_smd_work)); } @@ -1279,74 +1615,54 @@ static void diag_smd_notify(void *ctxt, unsigned event) #if defined(CONFIG_MSM_N_WAY_SMD) static void diag_smd_qdsp_notify(void *ctxt, unsigned event) { + if (event == SMD_EVENT_CLOSE) { + pr_info("diag: clean lpass registration\n"); + diag_clear_reg(QDSP_PROC); + driver->chqdsp = 0; + return; + } else if (event == SMD_EVENT_OPEN) { + driver->chqdsp = chqdsp_temp; + } queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_work)); } #endif static void diag_smd_wcnss_notify(void *ctxt, unsigned event) { - queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); -} - -#if DIAG_XPST -void diag_smd_enable(smd_channel_t *ch, char *src, int mode) -{ - int r = 0; - static smd_channel_t *_ch; - DIAGFWD_INFO("smd_try_open(%s): mode=%d\n", src, mode); - - mutex_lock(&driver->smd_lock); - diag_smd_function_mode = mode; - if (mode) { - if (!driver->ch) { - r = smd_open(SMDDIAG_NAME, &driver->ch, driver, diag_smd_notify); - if (!r) - _ch = driver->ch; - } else - _ch = driver->ch; - } else { - if (driver->ch) { - r = smd_close(driver->ch); - driver->ch = NULL; - if (!r) - _ch = driver->ch; - } + if (event == SMD_EVENT_CLOSE) { + pr_info("diag: clean wcnss registration\n"); + diag_clear_reg(WCNSS_PROC); + driver->ch_wcnss = 0; + return; + } else if (event == SMD_EVENT_OPEN) { + driver->ch_wcnss = ch_wcnss_temp; } - ch = _ch; - mutex_unlock(&driver->smd_lock); - DIAGFWD_INFO("smd_try_open(%s): r=%d _ch=%x\n", src, r, (unsigned int)ch); + queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_work)); } -#endif static int diag_smd_probe(struct platform_device *pdev) { int r = 0; if (pdev->id == SMD_APPS_MODEM) { - r = smd_open(SMDDIAG_NAME, &driver->ch, driver, diag_smd_notify); - wmb(); + r = smd_open("DIAG", &driver->ch, driver, diag_smd_notify); ch_temp = driver->ch; - DIAGFWD_INFO("%s: smd_open(%s):%d, ch_temp:%p, driver->ch:%p, &driver->ch:%p\n", - __func__, SMDDIAG_NAME, r, ch_temp, driver->ch, &driver->ch); } #if defined(CONFIG_MSM_N_WAY_SMD) if (pdev->id == SMD_APPS_QDSP) { -#if defined(CONFIG_MACH_MECHA) || defined(CONFIG_ARCH_MSM8X60_LTE) \ - || defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) r = smd_named_open_on_edge("DIAG", SMD_APPS_QDSP , &driver->chqdsp, driver, diag_smd_qdsp_notify); -#else - r = smd_open("DSP_DIAG", &driver->chqdsp, driver, diag_smd_qdsp_notify); -#endif + chqdsp_temp = driver->chqdsp; } #endif - if (pdev->id == SMD_APPS_WCNSS) + if (pdev->id == SMD_APPS_WCNSS) { r = smd_named_open_on_edge("APPS_RIVA_DATA", SMD_APPS_WCNSS , &driver->ch_wcnss, driver, diag_smd_wcnss_notify); + ch_wcnss_temp = driver->ch_wcnss; + } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pr_debug("diag: open SMD port, Id = %d, r = %d\n", pdev->id, r); - smd_diag_initialized = 1; return 0; } @@ -1372,7 +1688,7 @@ static struct platform_driver msm_smd_ch1_driver = { .probe = diag_smd_probe, .driver = { - .name = SMDDIAG_NAME, + .name = "DIAG", .owner = THIS_MODULE, .pm = &diagfwd_dev_pm_ops, }, @@ -1392,16 +1708,26 @@ void diagfwd_init(void) { diag_debug_buf_idx = 0; driver->read_len_legacy = 0; + mutex_init(&driver->diag_cntl_mutex); - /* FIXME: there should be a better way to know if wcnss enabled */ - if (chk_config_get_id() == AO8960_TOOLS_ID) { - is_wcnss_used = 1; - DIAGFWD_INFO("wcnss channel was enabled in the platform\n"); - } else { - is_wcnss_used = 0; - DIAGFWD_INFO("wcnss channel was not enabled in the platform\n"); + if (driver->event_mask == NULL) { + driver->event_mask = kzalloc(sizeof( + struct diag_ctrl_event_mask), GFP_KERNEL); + if (driver->event_mask == NULL) + goto err; + } + if (driver->msg_mask == NULL) { + driver->msg_mask = kzalloc(sizeof( + struct diag_ctrl_msg_mask), GFP_KERNEL); + if (driver->msg_mask == NULL) + goto err; + } + if (driver->log_mask == NULL) { + driver->log_mask = kzalloc(sizeof( + struct diag_ctrl_log_mask), GFP_KERNEL); + if (driver->log_mask == NULL) + goto err; } - if (driver->buf_in_1 == NULL) { driver->buf_in_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_1 == NULL) @@ -1422,11 +1748,29 @@ void diagfwd_init(void) if (driver->buf_in_qdsp_2 == NULL) goto err; } - if (is_wcnss_used && driver->buf_in_wcnss == NULL) { + if (driver->buf_in_wcnss == NULL) { driver->buf_in_wcnss = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_wcnss == NULL) goto err; } + if (driver->buf_msg_mask_update == NULL) { + driver->buf_msg_mask_update = kzalloc(APPS_BUF_SIZE, + GFP_KERNEL); + if (driver->buf_msg_mask_update == NULL) + goto err; + } + if (driver->buf_log_mask_update == NULL) { + driver->buf_log_mask_update = kzalloc(APPS_BUF_SIZE, + GFP_KERNEL); + if (driver->buf_log_mask_update == NULL) + goto err; + } + if (driver->buf_event_mask_update == NULL) { + driver->buf_event_mask_update = kzalloc(APPS_BUF_SIZE, + GFP_KERNEL); + if (driver->buf_event_mask_update == NULL) + goto err; + } if (driver->usb_buf_out == NULL && (driver->usb_buf_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL)) == NULL) @@ -1442,10 +1786,12 @@ void diagfwd_init(void) && (driver->msg_masks = kzalloc(MSG_MASK_SIZE, GFP_KERNEL)) == NULL) goto err; + diag_create_msg_mask_table(); + diag_event_num_bytes = 0; if (driver->log_masks == NULL && (driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL)) == NULL) goto err; - driver->log_masks_length = 8*MAX_EQUIP_ID; + driver->log_masks_length = (sizeof(struct mask_info))*MAX_EQUIP_ID; if (driver->event_masks == NULL && (driver->event_masks = kzalloc(EVENT_MASK_SIZE, GFP_KERNEL)) == NULL) @@ -1455,13 +1801,6 @@ void diagfwd_init(void) ((driver->num_clients) * sizeof(struct diag_client_map), GFP_KERNEL)) == NULL) goto err; -#ifdef CONFIG_DIAG_SDIO_PIPE - if (driver->mdmclient_map == NULL && - (driver->mdmclient_map = kzalloc - ((driver->num_mdmclients) * sizeof(struct diag_client_map), - GFP_KERNEL)) == NULL) - goto err; -#endif if (driver->buf_tbl == NULL) driver->buf_tbl = kzalloc(buf_tbl_size * sizeof(struct diag_write_device), GFP_KERNEL); @@ -1471,14 +1810,8 @@ void diagfwd_init(void) (driver->data_ready = kzalloc(driver->num_clients * sizeof(int) , GFP_KERNEL)) == NULL) goto err; -#ifdef CONFIG_DIAG_SDIO_PIPE - if (driver->mdmdata_ready == NULL && - (driver->mdmdata_ready = kzalloc(driver->num_mdmclients * sizeof(struct - diag_client_map), GFP_KERNEL)) == NULL) - goto err; -#endif if (driver->table == NULL && - (driver->table = kzalloc(diag_max_registration* + (driver->table = kzalloc(diag_max_reg* sizeof(struct diag_master_table), GFP_KERNEL)) == NULL) goto err; @@ -1523,7 +1856,7 @@ void diagfwd_init(void) GFP_KERNEL)) == NULL) goto err; if (driver->apps_rsp_buf == NULL) { - driver->apps_rsp_buf = kzalloc(500, GFP_KERNEL); + driver->apps_rsp_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->apps_rsp_buf == NULL) goto err; } @@ -1531,15 +1864,18 @@ void diagfwd_init(void) #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn); INIT_WORK(&(driver->diag_read_work), diag_read_work_fn); + INIT_WORK(&(driver->diag_modem_mask_update_work), + diag_modem_mask_update_fn); + INIT_WORK(&(driver->diag_qdsp_mask_update_work), + diag_qdsp_mask_update_fn); + INIT_WORK(&(driver->diag_wcnss_mask_update_work), + diag_wcnss_mask_update_fn); driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver, diag_usb_legacy_notifier); if (IS_ERR(driver->legacy_ch)) { printk(KERN_ERR "Unable to open USB diag legacy channel\n"); goto err; } -#endif -#if DIAG_XPST - mutex_init(&driver->smd_lock); #endif platform_driver_register(&msm_smd_ch1_driver); platform_driver_register(&diag_smd_lite_driver); @@ -1547,11 +1883,17 @@ void diagfwd_init(void) return; err: pr_err("diag: Could not initialize diag buffers"); + kfree(driver->event_mask); + kfree(driver->log_mask); + kfree(driver->msg_mask); kfree(driver->buf_in_1); kfree(driver->buf_in_2); kfree(driver->buf_in_qdsp_1); kfree(driver->buf_in_qdsp_2); kfree(driver->buf_in_wcnss); + kfree(driver->buf_msg_mask_update); + kfree(driver->buf_log_mask_update); + kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->msg_masks); @@ -1582,7 +1924,6 @@ void diagfwd_exit(void) driver->ch = 0; /* SMD can make this NULL */ driver->chqdsp = 0; driver->ch_wcnss = 0; - smd_diag_initialized = 0; #ifdef CONFIG_DIAG_OVER_USB if (driver->usb_connected) usb_diag_free_req(driver->legacy_ch); @@ -1590,11 +1931,17 @@ void diagfwd_exit(void) #endif platform_driver_unregister(&msm_smd_ch1_driver); platform_driver_unregister(&diag_smd_lite_driver); + kfree(driver->event_mask); + kfree(driver->log_mask); + kfree(driver->msg_mask); kfree(driver->buf_in_1); kfree(driver->buf_in_2); kfree(driver->buf_in_qdsp_1); kfree(driver->buf_in_qdsp_2); kfree(driver->buf_in_wcnss); + kfree(driver->buf_msg_mask_update); + kfree(driver->buf_log_mask_update); + kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->msg_masks); diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h index 0843bb399a3..574445915dc 100644 --- a/drivers/char/diag/diagfwd.h +++ b/drivers/char/diag/diagfwd.h @@ -1,5 +1,4 @@ - -/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,11 +16,6 @@ #define NO_PROCESS 0 #define NON_APPS_PROC -1 -#define DIAGLOG_MODE_NONE 0 -#define DIAGLOG_MODE_HEAD 1 -#define DIAGLOG_MODE_FULL 2 -#define DIAGLOG_MODE_PING 3 - void diagfwd_init(void); void diagfwd_exit(void); void diag_process_hdlc(void *data, unsigned len); @@ -32,23 +26,18 @@ void diag_usb_legacy_notifier(void *, unsigned, struct diag_request *); long diagchar_ioctl(struct file *, unsigned int, unsigned long); int diag_device_write(void *, int, struct diag_request *); int mask_request_validate(unsigned char mask_buf[]); -int chk_config_get_id(void); void diag_clear_reg(int); - +int chk_apps_only(void); +void diag_send_event_mask_update(smd_channel_t *, int num_bytes); +void diag_send_msg_mask_update(smd_channel_t *, int ssid_first, + int ssid_last, int proc); +void diag_send_log_mask_update(smd_channel_t *, int); /* State for diag forwarding */ #ifdef CONFIG_DIAG_OVER_USB int diagfwd_connect(void); int diagfwd_disconnect(void); #endif -extern int diag_support_mdm9k; extern int diag_debug_buf_idx; extern unsigned char diag_debug_buf[1024]; -extern unsigned diag7k_debug_mask; -extern unsigned diag9k_debug_mask; - -#define SMD_FUNC_CLOSE 0 -#define SMD_FUNC_OPEN_DIAG 1 -#define SMD_FUNC_OPEN_BT 2 -void diag_smd_enable(smd_channel_t *ch, char *src, int mode); - +extern int diag_event_num_bytes; #endif diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 13c3c478a93..fcf16d05838 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,12 +16,81 @@ #include "diagchar.h" #include "diagfwd.h" #include "diagfwd_cntl.h" -#ifdef CONFIG_DIAG_OVER_USB -#include -#endif #define HDR_SIZ 8 +void diag_smd_cntl_notify(void *ctxt, unsigned event) +{ + int r1, r2; + + if (!(driver->ch_cntl)) + return; + + switch (event) { + case SMD_EVENT_DATA: + r1 = smd_read_avail(driver->ch_cntl); + r2 = smd_cur_packet_size(driver->ch_cntl); + if (r1 > 0 && r1 == r2) + queue_work(driver->diag_wq, + &(driver->diag_read_smd_cntl_work)); + else + pr_debug("diag: incomplete pkt on Modem CNTL ch\n"); + break; + case SMD_EVENT_OPEN: + queue_work(driver->diag_cntl_wq, + &(driver->diag_modem_mask_update_work)); + break; + } +} + +void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event) +{ + int r1, r2; + + if (!(driver->chqdsp_cntl)) + return; + + switch (event) { + case SMD_EVENT_DATA: + r1 = smd_read_avail(driver->chqdsp_cntl); + r2 = smd_cur_packet_size(driver->chqdsp_cntl); + if (r1 > 0 && r1 == r2) + queue_work(driver->diag_wq, + &(driver->diag_read_smd_qdsp_cntl_work)); + else + pr_debug("diag: incomplete pkt on LPASS CNTL ch\n"); + break; + case SMD_EVENT_OPEN: + queue_work(driver->diag_cntl_wq, + &(driver->diag_qdsp_mask_update_work)); + break; + } +} + +void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event) +{ + int r1, r2; + + if (!(driver->ch_wcnss_cntl)) + return; + + switch (event) { + case SMD_EVENT_DATA: + r1 = smd_read_avail(driver->ch_wcnss_cntl); + r2 = smd_cur_packet_size(driver->ch_wcnss_cntl); + if (r1 > 0 && r1 == r2) + queue_work(driver->diag_wq, + &(driver->diag_read_smd_wcnss_cntl_work)); + else + pr_debug("diag: incomplete pkt on WCNSS CNTL ch\n"); + break; + case SMD_EVENT_OPEN: + queue_work(driver->diag_cntl_wq, + &(driver->diag_wcnss_mask_update_work)); + break; + } +} + static void diag_smd_cntl_send_req(int proc_num) { int data_len = 0, type = -1, count_bytes = 0, j, r, flag = 0; @@ -30,12 +99,13 @@ static void diag_smd_cntl_send_req(int proc_num) struct diag_ctrl_msg *msg; struct cmd_code_range *range; struct bindpkt_params *temp; - void *buf = NULL, *dump_buf = NULL; + void *buf = NULL; smd_channel_t *smd_ch = NULL; - DIAG_INFO("%s: %s\n", __func__, - (proc_num == MODEM_PROC)?"MODEM_PROC": - (proc_num == QDSP_PROC)?"QDSP_PROC":"WCNSS_PROC"); + if (pkt_params == NULL) { + pr_alert("diag: Memory allocation failure\n"); + return; + } if (proc_num == MODEM_PROC) { buf = driver->buf_in_cntl; @@ -69,24 +139,29 @@ static void diag_smd_cntl_send_req(int proc_num) while (count_bytes + HDR_SIZ <= r) { type = *(uint32_t *)(buf); data_len = *(uint32_t *)(buf + 4); + if (type < DIAG_CTRL_MSG_REG || + type > DIAG_CTRL_MSG_F3_MASK_V2) { + pr_alert("diag: Invalid Msg type %d proc %d", + type, proc_num); + break; + } + if (data_len < 0 || data_len > r) { + pr_alert("diag: Invalid data len %d proc %d", + data_len, proc_num); + break; + } count_bytes = count_bytes+HDR_SIZ+data_len; if (type == DIAG_CTRL_MSG_REG && r >= count_bytes) { msg = buf+HDR_SIZ; - if (!msg->count_entries) { - DIAG_ERR("version: %d, cmd_code: %d," - " subsysid: %d, count_entries: %d," - " port:%d\n", msg->version, - msg->cmd_code, msg->subsysid, - msg->count_entries, msg->port); - dump_buf = kmalloc(r, GFP_KERNEL); - memcpy(dump_buf, buf, r); - continue; - } range = buf+HDR_SIZ+ sizeof(struct diag_ctrl_msg); pkt_params->count = msg->count_entries; temp = kzalloc(pkt_params->count * sizeof(struct bindpkt_params), GFP_KERNEL); + if (temp == NULL) { + pr_alert("diag: Memory alloc fail\n"); + return; + } for (j = 0; j < pkt_params->count; j++) { temp->cmd_code = msg->cmd_code; temp->subsys_id = msg->subsysid; @@ -103,23 +178,19 @@ static void diag_smd_cntl_send_req(int proc_num) diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG, (unsigned long)pkt_params); kfree(temp); - buf = buf + HDR_SIZ + data_len; } + buf = buf + HDR_SIZ + data_len; } } - if (dump_buf) { - print_hex_dump(KERN_DEBUG, "diag_debug_buf:", - 16, 1, DUMP_PREFIX_ADDRESS, dump_buf, r, 1); - kfree(dump_buf); - } kfree(pkt_params); if (flag) { /* Poll SMD CNTL channels to check for data */ - queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work)); - queue_work(driver->diag_wq, - &(driver->diag_read_smd_qdsp_cntl_work)); - queue_work(driver->diag_wq, - &(driver->diag_read_smd_wcnss_cntl_work)); + if (proc_num == MODEM_PROC) + diag_smd_cntl_notify(NULL, SMD_EVENT_DATA); + else if (proc_num == QDSP_PROC) + diag_smd_qdsp_cntl_notify(NULL, SMD_EVENT_DATA); + else if (proc_num == WCNSS_PROC) + diag_smd_wcnss_cntl_notify(NULL, SMD_EVENT_DATA); } } @@ -138,27 +209,12 @@ void diag_read_smd_wcnss_cntl_work_fn(struct work_struct *work) diag_smd_cntl_send_req(WCNSS_PROC); } -static void diag_smd_cntl_notify(void *ctxt, unsigned event) -{ - queue_work(driver->diag_wq, &(driver->diag_read_smd_cntl_work)); -} - -static void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event) -{ - queue_work(driver->diag_wq, &(driver->diag_read_smd_qdsp_cntl_work)); -} - -static void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event) -{ - queue_work(driver->diag_wq, &(driver->diag_read_smd_wcnss_cntl_work)); -} - static int diag_smd_cntl_probe(struct platform_device *pdev) { int r = 0; - /* open control ports only on 8960 */ - if (chk_config_get_id() == AO8960_TOOLS_ID) { + /* open control ports only on 8960 & newer targets */ + if (chk_apps_only()) { if (pdev->id == SMD_APPS_MODEM) r = smd_open("DIAG_CNTL", &driver->ch_cntl, driver, diag_smd_cntl_notify); @@ -214,6 +270,7 @@ static struct platform_driver diag_smd_lite_cntl_driver = { void diagfwd_cntl_init(void) { + driver->diag_cntl_wq = create_singlethread_workqueue("diag_cntl_wq"); if (driver->buf_in_cntl == NULL) { driver->buf_in_cntl = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (driver->buf_in_cntl == NULL) @@ -238,6 +295,8 @@ void diagfwd_cntl_init(void) kfree(driver->buf_in_cntl); kfree(driver->buf_in_qdsp_cntl); kfree(driver->buf_in_wcnss_cntl); + if (driver->diag_cntl_wq) + destroy_workqueue(driver->diag_cntl_wq); } void diagfwd_cntl_exit(void) @@ -248,6 +307,7 @@ void diagfwd_cntl_exit(void) driver->ch_cntl = 0; driver->chqdsp_cntl = 0; driver->ch_wcnss_cntl = 0; + destroy_workqueue(driver->diag_cntl_wq); platform_driver_unregister(&msm_smd_ch1_cntl_driver); platform_driver_unregister(&diag_smd_lite_cntl_driver); diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h index 542138df1f6..ad1fec967b2 100644 --- a/drivers/char/diag/diagfwd_cntl.h +++ b/drivers/char/diag/diagfwd_cntl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,7 +13,22 @@ #ifndef DIAGFWD_CNTL_H #define DIAGFWD_CNTL_H -#define DIAG_CTRL_MSG_REG 1 /* Message registration commands */ +/* Message registration commands */ +#define DIAG_CTRL_MSG_REG 1 +/* Message passing for DTR events */ +#define DIAG_CTRL_MSG_DTR 2 +/* Control Diag sleep vote, buffering etc */ +#define DIAG_CTRL_MSG_DIAGMODE 3 +/* Diag data based on "light" diag mask */ +#define DIAG_CTRL_MSG_DIAGDATA 4 +/* Send diag internal feature mask 'diag_int_feature_mask' */ +#define DIAG_CTRL_MSG_FEATURE 8 +/* Send Diag log mask for a particular equip id */ +#define DIAG_CTRL_MSG_EQUIP_LOG_MASK 9 +/* Send Diag event mask */ +#define DIAG_CTRL_MSG_EVENT_MASK_V2 10 +/* Send Diag F3 mask */ +#define DIAG_CTRL_MSG_F3_MASK_V2 11 struct cmd_code_range { uint16_t cmd_code_lo; @@ -29,10 +44,46 @@ struct diag_ctrl_msg { uint16_t port; }; +struct diag_ctrl_event_mask { + uint32_t cmd_type; + uint32_t data_len; + uint8_t stream_id; + uint8_t status; + uint8_t event_config; + uint32_t event_mask_size; + /* Copy event mask here */ +} __packed; + +struct diag_ctrl_log_mask { + uint32_t cmd_type; + uint32_t data_len; + uint8_t stream_id; + uint8_t status; + uint8_t equip_id; + uint32_t num_items; /* Last log code for this equip_id */ + uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */ + /* Copy log mask here */ +} __packed; + +struct diag_ctrl_msg_mask { + uint32_t cmd_type; + uint32_t data_len; + uint8_t stream_id; + uint8_t status; + uint8_t msg_mode; + uint16_t ssid_first; /* Start of range of supported SSIDs */ + uint16_t ssid_last; /* Last SSID in range */ + uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */ + /* Copy msg mask here */ +} __packed; + void diagfwd_cntl_init(void); void diagfwd_cntl_exit(void); void diag_read_smd_cntl_work_fn(struct work_struct *); void diag_read_smd_qdsp_cntl_work_fn(struct work_struct *); void diag_read_smd_wcnss_cntl_work_fn(struct work_struct *); +void diag_smd_cntl_notify(void *ctxt, unsigned event); +void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event); +void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event); #endif diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c new file mode 100644 index 00000000000..ac5722f3b8b --- /dev/null +++ b/drivers/char/diag/diagfwd_hsic.c @@ -0,0 +1,530 @@ +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DIAG_OVER_USB +#include +#endif +#include "diagchar_hdlc.h" +#include "diagmem.h" +#include "diagchar.h" +#include "diagfwd.h" +#include "diagfwd_hsic.h" + +static void diag_read_hsic_work_fn(struct work_struct *work) +{ + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + /* + * If there is no hsic data being read from the hsic and there + * is no hsic data being written to the usb mdm channel + */ + if (!driver->in_busy_hsic_read && !driver->in_busy_hsic_write_on_mdm) { + /* + * Initiate the read from the hsic. The hsic read is + * asynchronous. Once the read is complete the read + * callback function will be called. + */ + int err; + driver->in_busy_hsic_read = 1; + APPEND_DEBUG('i'); + err = diag_bridge_read((char *)driver->buf_in_hsic, + IN_BUF_SIZE); + if (err) { + pr_err("DIAG: Error initiating HSIC read, err: %d\n", + err); + /* + * If the error is recoverable, then clear + * the read flag, so we will resubmit a + * read on the next frame. Otherwise, don't + * resubmit a read on the next frame. + */ + if ((-ESHUTDOWN) != err) + driver->in_busy_hsic_read = 0; + } + } + + /* + * If for some reason there was no hsic data, set up + * the next read + */ + if (!driver->in_busy_hsic_read) + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); +} + +static void diag_hsic_read_complete_callback(void *ctxt, char *buf, + int buf_size, int actual_size) +{ + /* The read of the data from the HSIC bridge is complete */ + driver->in_busy_hsic_read = 0; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + APPEND_DEBUG('j'); + if (actual_size > 0) { + if (!buf) { + pr_err("Out of diagmem for HSIC\n"); + } else { + driver->write_ptr_mdm->length = actual_size; + /* + * Set flag to denote hsic data is currently + * being written to the usb mdm channel. + * driver->buf_in_hsic was given to + * diag_bridge_read(), so buf here should be + * driver->buf_in_hsic + */ + driver->in_busy_hsic_write_on_mdm = 1; + diag_device_write((void *)buf, HSIC_DATA, + driver->write_ptr_mdm); + } + } else { + pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size); + } + + /* + * If for some reason there was no hsic data to write to the + * mdm channel, set up another read + */ + if (!driver->in_busy_hsic_write_on_mdm) + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); +} + +static void diag_hsic_write_complete_callback(void *ctxt, char *buf, + int buf_size, int actual_size) +{ + /* The write of the data to the HSIC bridge is complete */ + driver->in_busy_hsic_write = 0; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + if (actual_size < 0) + pr_err("DIAG in %s: actual_size: %d\n", __func__, actual_size); + + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); +} + +static struct diag_bridge_ops hsic_diag_bridge_ops = { + .ctxt = NULL, + .read_complete_cb = diag_hsic_read_complete_callback, + .write_complete_cb = diag_hsic_write_complete_callback, +}; + +static int diag_hsic_close(void) +{ + if (driver->hsic_device_enabled) { + driver->hsic_ch = 0; + if (driver->hsic_device_opened) { + driver->hsic_device_opened = 0; + diag_bridge_close(); + } + pr_debug("DIAG in %s: closed successfully\n", __func__); + } else { + pr_debug("DIAG in %s: already closed\n", __func__); + } + + return 0; +} + +/* diagfwd_connect_hsic is called when the USB mdm channel is connected */ +static int diagfwd_connect_hsic(void) +{ + int err; + + pr_debug("DIAG in %s\n", __func__); + + err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE, N_MDM_READ); + if (err) + pr_err("DIAG: unable to alloc USB req on mdm ch err:%d\n", err); + + driver->usb_mdm_connected = 1; + driver->in_busy_hsic_write_on_mdm = 0; + driver->in_busy_hsic_read_on_mdm = 0; + driver->in_busy_hsic_write = 0; + driver->in_busy_hsic_read = 0; + + /* If the hsic (diag_bridge) platform device is not open */ + if (driver->hsic_device_enabled) { + if (!driver->hsic_device_opened) { + err = diag_bridge_open(&hsic_diag_bridge_ops); + if (err) { + pr_err("DIAG: HSIC channel open error: %d\n", + err); + } else { + pr_info("DIAG: opened HSIC channel\n"); + driver->hsic_device_opened = 1; + } + } else { + pr_info("DIAG: HSIC channel already open\n"); + } + + /* + * Turn on communication over usb mdm and hsic, if the hsic + * device driver is enabled and opened + */ + if (driver->hsic_device_opened) + driver->hsic_ch = 1; + + /* Poll USB mdm channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); + + /* Poll HSIC channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); + } else { + /* The hsic device driver has not yet been enabled */ + pr_info("DIAG: HSIC channel not yet enabled\n"); + } + + return 0; +} + +/* + * diagfwd_disconnect_hsic is called when the USB mdm channel + * is disconnected + */ +static int diagfwd_disconnect_hsic(void) +{ + pr_debug("DIAG in %s\n", __func__); + + driver->usb_mdm_connected = 0; + usb_diag_free_req(driver->mdm_ch); + driver->in_busy_hsic_write_on_mdm = 1; + driver->in_busy_hsic_read_on_mdm = 1; + driver->in_busy_hsic_write = 1; + driver->in_busy_hsic_read = 1; + + /* Turn off communication over usb mdm and hsic */ + driver->hsic_ch = 0; + + return 0; +} + +/* + * diagfwd_write_complete_hsic is called after the asynchronous + * usb_diag_write() on mdm channel is complete + */ +static int diagfwd_write_complete_hsic(void) +{ + /* + * Clear flag to denote that the write of the hsic data on the + * usb mdm channel is complete + */ + driver->in_busy_hsic_write_on_mdm = 0; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return 0; + } + + APPEND_DEBUG('q'); + + /* Read data from the hsic */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); + + return 0; +} + +/* Called after the asychronous usb_diag_read() on mdm channel is complete */ +static int diagfwd_read_complete_hsic(struct diag_request *diag_read_ptr) +{ + /* The read of the usb driver on the mdm (not hsic) has completed */ + driver->in_busy_hsic_read_on_mdm = 0; + driver->read_len_mdm = diag_read_ptr->actual; + + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return 0; + } + + /* + * The read of the usb driver on the mdm channel has completed. + * If there is no write on the hsic in progress, check if the + * read has data to pass on to the hsic. If so, pass the usb + * mdm data on to the hsic. + */ + if (!driver->in_busy_hsic_write && driver->usb_buf_mdm_out && + (driver->read_len_mdm > 0)) { + + /* + * Initiate the hsic write. The hsic write is + * asynchronous. When complete the write + * complete callback function will be called + */ + int err; + driver->in_busy_hsic_write = 1; + err = diag_bridge_write(driver->usb_buf_mdm_out, + driver->read_len_mdm); + if (err) { + pr_err("DIAG: mdm data on hsic write err: %d\n", err); + /* + * If the error is recoverable, then clear + * the write flag, so we will resubmit a + * write on the next frame. Otherwise, don't + * resubmit a write on the next frame. + */ + if ((-ESHUTDOWN) != err) + driver->in_busy_hsic_write = 0; + } + } + + /* + * If there is no write of the usb mdm data on the + * hsic channel + */ + if (!driver->in_busy_hsic_write) + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); + + return 0; +} + +static void diagfwd_hsic_notifier(void *priv, unsigned event, + struct diag_request *d_req) +{ + switch (event) { + case USB_DIAG_CONNECT: + diagfwd_connect_hsic(); + break; + case USB_DIAG_DISCONNECT: + diagfwd_disconnect_hsic(); + break; + case USB_DIAG_READ_DONE: + diagfwd_read_complete_hsic(d_req); + break; + case USB_DIAG_WRITE_DONE: + diagfwd_write_complete_hsic(); + break; + default: + pr_err("DIAG in %s: Unknown event from USB diag:%u\n", + __func__, event); + break; + } +} + +static void diag_read_mdm_work_fn(struct work_struct *work) +{ + if (!driver->hsic_ch) { + pr_err("DIAG in %s: driver->hsic_ch == 0\n", __func__); + return; + } + + /* + * If there is no data being read from the usb mdm channel + * and there is no mdm channel data currently being written + * to the hsic + */ + if (!driver->in_busy_hsic_read_on_mdm && !driver->in_busy_hsic_write) { + APPEND_DEBUG('x'); + + /* Setup the next read from usb mdm channel */ + driver->in_busy_hsic_read_on_mdm = 1; + driver->usb_read_mdm_ptr->buf = driver->usb_buf_mdm_out; + driver->usb_read_mdm_ptr->length = USB_MAX_OUT_BUF; + usb_diag_read(driver->mdm_ch, driver->usb_read_mdm_ptr); + APPEND_DEBUG('y'); + } + + /* + * If for some reason there was no mdm channel read initiated, + * queue up the reading of data from the mdm channel + */ + if (!driver->in_busy_hsic_read_on_mdm) + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); +} + +int diag_hsic_enable(void) +{ + pr_debug("DIAG in %s\n", __func__); + + driver->read_len_mdm = 0; + if (driver->buf_in_hsic == NULL) + driver->buf_in_hsic = kzalloc(IN_BUF_SIZE, GFP_KERNEL); + if (driver->buf_in_hsic == NULL) + goto err; + if (driver->usb_buf_mdm_out == NULL) + driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); + if (driver->usb_buf_mdm_out == NULL) + goto err; + if (driver->write_ptr_mdm == NULL) + driver->write_ptr_mdm = kzalloc( + sizeof(struct diag_request), GFP_KERNEL); + if (driver->write_ptr_mdm == NULL) + goto err; + if (driver->usb_read_mdm_ptr == NULL) + driver->usb_read_mdm_ptr = kzalloc( + sizeof(struct diag_request), GFP_KERNEL); + if (driver->usb_read_mdm_ptr == NULL) + goto err; + driver->diag_hsic_wq = create_singlethread_workqueue("diag_hsic_wq"); +#ifdef CONFIG_DIAG_OVER_USB + INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn); +#endif + INIT_WORK(&(driver->diag_read_hsic_work), diag_read_hsic_work_fn); + + driver->hsic_device_enabled = 1; + + return 0; +err: + pr_err("DIAG could not initialize buf for HSIC\n"); + kfree(driver->buf_in_hsic); + kfree(driver->usb_buf_mdm_out); + kfree(driver->write_ptr_mdm); + kfree(driver->usb_read_mdm_ptr); + if (driver->diag_hsic_wq) + destroy_workqueue(driver->diag_hsic_wq); + + return -ENOMEM; +} + +static int diag_hsic_probe(struct platform_device *pdev) +{ + int err; + + if (!driver->hsic_device_enabled) { + err = diag_hsic_enable(); + if (err) { + pr_err("DIAG could not enable HSIC, err: %d\n", err); + return err; + } + } + + /* The hsic (diag_bridge) platform device driver is enabled */ + err = diag_bridge_open(&hsic_diag_bridge_ops); + if (err) { + pr_err("DIAG could not open HSIC channel, err: %d\n", err); + driver->hsic_device_opened = 0; + return err; + } + + pr_info("DIAG opened HSIC channel\n"); + driver->hsic_device_opened = 1; + + /* + * The probe function was called after the usb was connected + * on the legacy channel. Communication over usb mdm and hsic + * needs to be turned on. + */ + if (driver->usb_connected) { + driver->hsic_ch = 1; + driver->in_busy_hsic_write_on_mdm = 0; + driver->in_busy_hsic_read_on_mdm = 0; + driver->in_busy_hsic_write = 0; + driver->in_busy_hsic_read = 0; + + /* Poll USB mdm channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_mdm_work); + + /* Poll HSIC channel to check for data */ + queue_work(driver->diag_hsic_wq, &driver->diag_read_hsic_work); + } + + return err; +} + +static int diag_hsic_remove(struct platform_device *pdev) +{ + pr_info("DIAG: %s called\n", __func__); + diag_hsic_close(); + return 0; +} + +static int diagfwd_hsic_runtime_suspend(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: suspending...\n"); + return 0; +} + +static int diagfwd_hsic_runtime_resume(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: resuming...\n"); + return 0; +} + +static const struct dev_pm_ops diagfwd_hsic_dev_pm_ops = { + .runtime_suspend = diagfwd_hsic_runtime_suspend, + .runtime_resume = diagfwd_hsic_runtime_resume, +}; + +static struct platform_driver msm_hsic_ch_driver = { + .probe = diag_hsic_probe, + .remove = diag_hsic_remove, + .driver = { + .name = "diag_bridge", + .owner = THIS_MODULE, + .pm = &diagfwd_hsic_dev_pm_ops, + }, +}; + + +void __init diagfwd_hsic_init(void) +{ + int ret; + + pr_debug("DIAG in %s\n", __func__); + +#ifdef CONFIG_DIAG_OVER_USB + driver->mdm_ch = usb_diag_open(DIAG_MDM, driver, diagfwd_hsic_notifier); + if (IS_ERR(driver->mdm_ch)) { + pr_err("DIAG Unable to open USB diag MDM channel\n"); + goto err; + } +#endif + ret = platform_driver_register(&msm_hsic_ch_driver); + if (ret) + pr_err("DIAG could not register HSIC device, ret: %d\n", ret); + else + driver->hsic_initialized = 1; + + return; +err: + pr_err("DIAG could not initialize for HSIC execution\n"); +} + +void __exit diagfwd_hsic_exit(void) +{ + pr_debug("DIAG in %s\n", __func__); + + if (driver->hsic_initialized) + diag_hsic_close(); + +#ifdef CONFIG_DIAG_OVER_USB + if (driver->usb_mdm_connected) + usb_diag_free_req(driver->mdm_ch); +#endif + platform_driver_unregister(&msm_hsic_ch_driver); +#ifdef CONFIG_DIAG_OVER_USB + usb_diag_close(driver->mdm_ch); +#endif + kfree(driver->buf_in_hsic); + kfree(driver->usb_buf_mdm_out); + kfree(driver->write_ptr_mdm); + kfree(driver->usb_read_mdm_ptr); + destroy_workqueue(driver->diag_hsic_wq); + + driver->hsic_device_enabled = 0; +} diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h new file mode 100644 index 00000000000..6769052590b --- /dev/null +++ b/drivers/char/diag/diagfwd_hsic.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef DIAGFWD_HSIC_H +#define DIAGFWD_HSIC_H + +#include +#define N_MDM_WRITE 1 /* Upgrade to 2 with ping pong buffer */ +#define N_MDM_READ 1 + +void __init diagfwd_hsic_init(void); +void __exit diagfwd_hsic_exit(void); + +#endif diff --git a/drivers/char/diag/diagfwd_sdio.c b/drivers/char/diag/diagfwd_sdio.c index 84b662d9986..a145c0665e7 100644 --- a/drivers/char/diag/diagfwd_sdio.c +++ b/drivers/char/diag/diagfwd_sdio.c @@ -32,32 +32,19 @@ void __diag_sdio_send_req(void) { int r = 0; - void *buf = NULL; - int *in_busy_ptr = NULL; - struct diag_request *write_ptr_modem = NULL; - int retry = 0, type; + void *buf = driver->buf_in_sdio; - if (!driver->in_busy_sdio_1) { - buf = driver->buf_in_sdio_1; - write_ptr_modem = driver->write_ptr_mdm_1; - in_busy_ptr = &(driver->in_busy_sdio_1); - } else if (!driver->in_busy_sdio_2) { - buf = driver->buf_in_sdio_2; - write_ptr_modem = driver->write_ptr_mdm_2; - in_busy_ptr = &(driver->in_busy_sdio_2); - } - - if (driver->sdio_ch && buf) { + if (driver->sdio_ch && (!driver->in_busy_sdio)) { r = sdio_read_avail(driver->sdio_ch); if (r > IN_BUF_SIZE) { if (r < MAX_IN_BUF_SIZE) { pr_err("diag: SDIO sending" - " in packets more than %d bytes", r); + " packets more than %d bytes\n", r); buf = krealloc(buf, r, GFP_KERNEL); } else { pr_err("diag: SDIO sending" - " in packets more than %d bytes", MAX_IN_BUF_SIZE); + " in packets more than %d bytes\n", MAX_IN_BUF_SIZE); return; } } @@ -65,60 +52,20 @@ void __diag_sdio_send_req(void) if (!buf) printk(KERN_INFO "Out of diagmem for SDIO\n"); else { -drop: APPEND_DEBUG('i'); sdio_read(driver->sdio_ch, buf, r); - if ((driver->qxdm2sd_drop) && (driver->logging_mode == USB_MODE)) { - /*Drop the diag payload */ - DIAG_INFO("%s:Drop the diag payload :%d\n", __func__, retry); - print_hex_dump(KERN_DEBUG, "Drop Packet Data" - " from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1); - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; - r = sdio_read_avail(driver->sdio_ch); - if (++retry > 20) { - driver->qxdm2sd_drop = 0; - return; - } - if (r) - goto drop; - else { - driver->qxdm2sd_drop = 0; - return; - } - } - APPEND_DEBUG('j'); - - if (diag9k_debug_mask) { - switch (diag9k_debug_mask) { - case DIAGLOG_MODE_HEAD: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1); - break; - case DIAGLOG_MODE_FULL: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K(first 16 bytes)", DUMP_PREFIX_ADDRESS, 16, 1, buf, 16, 1); - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K(last 16 bytes) ", 16, 1, DUMP_PREFIX_ADDRESS, buf+r-16, 16, 1); - break; - default: - print_hex_dump(KERN_DEBUG, "Read Packet Data" - " from 9K ", DUMP_PREFIX_ADDRESS, 16, 1, buf, r, 1); - - } - } - - type = checkcmd_modem_epst(buf); - if (type) { - modem_to_userspace(buf, r, type, 1); + if (((!driver->usb_connected) && (driver-> + logging_mode == USB_MODE)) || (driver-> + logging_mode == NO_LOGGING_MODE)) { + /* Drop the diag payload */ + driver->in_busy_sdio = 0; return; } - - write_ptr_modem->length = r; - *in_busy_ptr = 1; + APPEND_DEBUG('j'); + driver->write_ptr_mdm->length = r; + driver->in_busy_sdio = 1; diag_device_write(buf, SDIO_DATA, - write_ptr_modem); - + driver->write_ptr_mdm); } } } @@ -129,6 +76,31 @@ static void diag_read_sdio_work_fn(struct work_struct *work) __diag_sdio_send_req(); } +static void diag_sdio_notify(void *ctxt, unsigned event) +{ + if (event == SDIO_EVENT_DATA_READ_AVAIL) + queue_work(driver->diag_sdio_wq, + &(driver->diag_read_sdio_work)); + + if (event == SDIO_EVENT_DATA_WRITE_AVAIL) + wake_up_interruptible(&driver->wait_q); +} + +static int diag_sdio_close(void) +{ + queue_work(driver->diag_sdio_wq, &(driver->diag_close_sdio_work)); + return 0; +} + +static void diag_close_sdio_work_fn(struct work_struct *work) +{ + pr_debug("diag: sdio close called\n"); + if (sdio_close(driver->sdio_ch)) + pr_err("diag: could not close SDIO channel\n"); + else + driver->sdio_ch = NULL; /* channel successfully closed */ +} + int diagfwd_connect_sdio(void) { int err; @@ -136,10 +108,20 @@ int diagfwd_connect_sdio(void) err = usb_diag_alloc_req(driver->mdm_ch, N_MDM_WRITE, N_MDM_READ); if (err) - printk(KERN_ERR "diag: unable to alloc USB req on mdm ch"); + pr_err("diag: unable to alloc USB req on mdm ch\n"); + + driver->in_busy_sdio = 0; + if (!driver->sdio_ch) { + err = sdio_open("SDIO_DIAG", &driver->sdio_ch, driver, + diag_sdio_notify); + if (err) + pr_info("diag: could not open SDIO channel\n"); + else + pr_info("diag: opened SDIO channel\n"); + } else { + pr_info("diag: SDIO channel already open\n"); + } - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; /* Poll USB channel to check for data*/ queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work)); /* Poll SDIO channel to check for data*/ @@ -149,18 +131,17 @@ int diagfwd_connect_sdio(void) int diagfwd_disconnect_sdio(void) { - /* driver->in_busy_sdio = 1; */ - /* Clear variable to Flush remaining data from SDIO channel */ - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; usb_diag_free_req(driver->mdm_ch); + if (driver->sdio_ch && (driver->logging_mode == USB_MODE)) { + driver->in_busy_sdio = 1; + diag_sdio_close(); + } return 0; } int diagfwd_write_complete_sdio(void) { - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; + driver->in_busy_sdio = 0; APPEND_DEBUG('q'); queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); return 0; @@ -174,12 +155,14 @@ int diagfwd_read_complete_sdio(void) void diag_read_mdm_work_fn(struct work_struct *work) { - if (diag9k_debug_mask) - DIAG_INFO("%s \n", __func__); - if (driver->sdio_ch) { - wait_event_interruptible(driver->wait_q, (sdio_write_avail - (driver->sdio_ch) >= driver->read_len_mdm)); + wait_event_interruptible(driver->wait_q, ((sdio_write_avail + (driver->sdio_ch) >= driver->read_len_mdm) || + !(driver->sdio_ch))); + if (!(driver->sdio_ch)) { + pr_alert("diag: sdio channel not valid"); + return; + } if (driver->sdio_ch && driver->usb_buf_mdm_out && (driver->read_len_mdm > 0)) sdio_write(driver->sdio_ch, driver->usb_buf_mdm_out, @@ -192,21 +175,9 @@ void diag_read_mdm_work_fn(struct work_struct *work) } } -static void diag_sdio_notify(void *ctxt, unsigned event) -{ - if (event == SDIO_EVENT_DATA_READ_AVAIL) - queue_work(driver->diag_sdio_wq, - &(driver->diag_read_sdio_work)); - - if (event == SDIO_EVENT_DATA_WRITE_AVAIL) - wake_up_interruptible(&driver->wait_q); -} - static int diag_sdio_probe(struct platform_device *pdev) { int err; - if (diag9k_debug_mask) - DIAG_INFO("%s\n", __func__); err = sdio_open("SDIO_DIAG", &driver->sdio_ch, driver, diag_sdio_notify); @@ -217,28 +188,15 @@ static int diag_sdio_probe(struct platform_device *pdev) queue_work(driver->diag_sdio_wq, &(driver->diag_read_mdm_work)); } - driver->in_busy_sdio_1 = 0; - driver->in_busy_sdio_2 = 0; - driver->qxdm2sd_drop = 0; - sdio_diag_initialized = 1; - return err; } static int diag_sdio_remove(struct platform_device *pdev) -{ - queue_work(driver->diag_sdio_wq, &(driver->diag_remove_sdio_work)); - return 0; -} - -static void diag_remove_sdio_work_fn(struct work_struct *work) { pr_debug("\n diag: sdio remove called"); - /*Disable SDIO channel to prevent further read/write */ + /* Disable SDIO channel to prevent further read/write */ driver->sdio_ch = NULL; - sdio_diag_initialized = 0; - driver->in_busy_sdio_1 = 1; - driver->in_busy_sdio_2 = 1; + return 0; } static int diagfwd_sdio_runtime_suspend(struct device *dev) @@ -272,31 +230,19 @@ void diagfwd_sdio_init(void) { int ret; - if (diag9k_debug_mask) - DIAG_INFO("%s\n", __func__); - driver->read_len_mdm = 0; - if (driver->buf_in_sdio_1 == NULL) - driver->buf_in_sdio_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); - if (driver->buf_in_sdio_1 == NULL) - goto err; - if (driver->buf_in_sdio_2 == NULL) - driver->buf_in_sdio_2 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); - if (driver->buf_in_sdio_2 == NULL) + if (driver->buf_in_sdio == NULL) + driver->buf_in_sdio = kzalloc(IN_BUF_SIZE, GFP_KERNEL); + if (driver->buf_in_sdio == NULL) goto err; if (driver->usb_buf_mdm_out == NULL) driver->usb_buf_mdm_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); if (driver->usb_buf_mdm_out == NULL) goto err; - if (driver->write_ptr_mdm_1 == NULL) - driver->write_ptr_mdm_1 = kzalloc( - sizeof(struct diag_request), GFP_KERNEL); - if (driver->write_ptr_mdm_1 == NULL) - goto err; - if (driver->write_ptr_mdm_2 == NULL) - driver->write_ptr_mdm_2 = kzalloc( + if (driver->write_ptr_mdm == NULL) + driver->write_ptr_mdm = kzalloc( sizeof(struct diag_request), GFP_KERNEL); - if (driver->write_ptr_mdm_2 == NULL) + if (driver->write_ptr_mdm == NULL) goto err; if (driver->usb_read_mdm_ptr == NULL) driver->usb_read_mdm_ptr = kzalloc( @@ -314,7 +260,7 @@ void diagfwd_sdio_init(void) INIT_WORK(&(driver->diag_read_mdm_work), diag_read_mdm_work_fn); #endif INIT_WORK(&(driver->diag_read_sdio_work), diag_read_sdio_work_fn); - INIT_WORK(&(driver->diag_remove_sdio_work), diag_remove_sdio_work_fn); + INIT_WORK(&(driver->diag_close_sdio_work), diag_close_sdio_work_fn); ret = platform_driver_register(&msm_sdio_ch_driver); if (ret) printk(KERN_INFO "DIAG could not register SDIO device"); @@ -324,11 +270,9 @@ void diagfwd_sdio_init(void) return; err: printk(KERN_INFO "\n Could not initialize diag buf for SDIO"); - kfree(driver->buf_in_sdio_1); - kfree(driver->buf_in_sdio_2); + kfree(driver->buf_in_sdio); kfree(driver->usb_buf_mdm_out); - kfree(driver->write_ptr_mdm_1); - kfree(driver->write_ptr_mdm_2); + kfree(driver->write_ptr_mdm); kfree(driver->usb_read_mdm_ptr); if (driver->diag_sdio_wq) destroy_workqueue(driver->diag_sdio_wq); @@ -344,11 +288,9 @@ void diagfwd_sdio_exit(void) #ifdef CONFIG_DIAG_OVER_USB usb_diag_close(driver->mdm_ch); #endif - kfree(driver->buf_in_sdio_1); - kfree(driver->buf_in_sdio_2); + kfree(driver->buf_in_sdio); kfree(driver->usb_buf_mdm_out); - kfree(driver->write_ptr_mdm_1); - kfree(driver->write_ptr_mdm_2); + kfree(driver->write_ptr_mdm); kfree(driver->usb_read_mdm_ptr); destroy_workqueue(driver->diag_sdio_wq); } diff --git a/drivers/char/diag/diagfwd_sdio.h b/drivers/char/diag/diagfwd_sdio.h index 57a35d29b95..40982c33783 100644 --- a/drivers/char/diag/diagfwd_sdio.h +++ b/drivers/char/diag/diagfwd_sdio.h @@ -14,7 +14,7 @@ #define DIAGFWD_SDIO_H #include -#define N_MDM_WRITE 2 /* Upgrade to 2 with ping pong buffer */ +#define N_MDM_WRITE 1 /* Upgrade to 2 with ping pong buffer */ #define N_MDM_READ 1 void diagfwd_sdio_init(void); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 34d6a1cab8d..853f8e89df3 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -374,26 +374,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) struct hpet_dev *devp; unsigned long addr; - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) - return -EINVAL; - devp = file->private_data; addr = devp->hd_hpets->hp_hpet_phys; if (addr & (PAGE_SIZE - 1)) return -ENOSYS; - vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) { - printk(KERN_ERR "%s: io_remap_pfn_range failed\n", - __func__); - return -EAGAIN; - } - - return 0; + return vm_iomap_memory(vma, addr, PAGE_SIZE); #else return -ENOSYS; #endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 2016aad8520..564f6c49243 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -40,6 +40,7 @@ #include #include #include +#include #include @@ -52,8 +53,12 @@ static struct hwrng *current_rng; static LIST_HEAD(rng_list); static DEFINE_MUTEX(rng_mutex); static int data_avail; -static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES] - __cacheline_aligned; +static u8 *rng_buffer; + +static size_t rng_buffer_size(void) +{ + return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; +} static inline int hwrng_init(struct hwrng *rng) { @@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf, if (!data_avail) { bytes_read = rng_get_data(current_rng, rng_buffer, - sizeof(rng_buffer), + rng_buffer_size(), !(filp->f_flags & O_NONBLOCK)); if (bytes_read < 0) { err = bytes_read; @@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng) mutex_lock(&rng_mutex); + /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ + err = -ENOMEM; + if (!rng_buffer) { + rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); + if (!rng_buffer) + goto out_unlock; + } + /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 75f1cbd61c1..ca7570df05a 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -88,14 +88,22 @@ static int virtrng_probe(struct virtio_device *vdev) { int err; + if (vq) { + /* We only support one device for now */ + return -EBUSY; + } /* We expect a single virtqueue. */ vq = virtio_find_single_vq(vdev, random_recv_done, "input"); - if (IS_ERR(vq)) - return PTR_ERR(vq); + if (IS_ERR(vq)) { + err = PTR_ERR(vq); + vq = NULL; + return err; + } err = hwrng_register(&virtio_hwrng); if (err) { vdev->config->del_vqs(vdev); + vq = NULL; return err; } @@ -107,6 +115,7 @@ static void __devexit virtrng_remove(struct virtio_device *vdev) vdev->config->reset(vdev); hwrng_unregister(&virtio_hwrng); vdev->config->del_vqs(vdev); + vq = NULL; } static struct virtio_device_id id_table[] = { diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index 3ed20e8abc0..92ce3026882 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -95,9 +95,9 @@ struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; - unsigned char write_data[IPMI_MAX_MSG_LENGTH]; + unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int write_count; - unsigned char read_data[IPMI_MAX_MSG_LENGTH]; + unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int read_count; int truncated; long timeout; /* microseconds countdown */ diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 2aa3977aae5..8dde1f58ef6 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -838,13 +838,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, return ipmi_ioctl(filep, cmd, arg); } } + +static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&ipmi_mutex); + ret = compat_ipmi_ioctl(filep, cmd, arg); + mutex_unlock(&ipmi_mutex); + + return ret; +} #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ipmi_unlocked_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = compat_ipmi_ioctl, + .compat_ioctl = unlocked_compat_ipmi_ioctl, #endif .open = ipmi_open, .release = ipmi_release, diff --git a/drivers/char/msm_rotator.c b/drivers/char/msm_rotator.c index 38ed23fb65e..ec0e28c65f1 100644 --- a/drivers/char/msm_rotator.c +++ b/drivers/char/msm_rotator.c @@ -43,6 +43,7 @@ #define MSM_ROTATOR_START (MSM_ROTATOR_BASE+0x0030) #define MSM_ROTATOR_MAX_BURST_SIZE (MSM_ROTATOR_BASE+0x0050) #define MSM_ROTATOR_HW_VERSION (MSM_ROTATOR_BASE+0x0070) +#define MSM_ROTATOR_SW_RESET (MSM_ROTATOR_BASE+0x0074) #define MSM_ROTATOR_SRC_SIZE (MSM_ROTATOR_BASE+0x1108) #define MSM_ROTATOR_SRCP0_ADDR (MSM_ROTATOR_BASE+0x110c) #define MSM_ROTATOR_SRCP1_ADDR (MSM_ROTATOR_BASE+0x1110) @@ -118,12 +119,19 @@ struct msm_rotator_mem_planes { #define checkoffset(offset, size, max_size) \ ((size) > (max_size) || (offset) > ((max_size) - (size))) +struct msm_rotator_fd_info { + int pid; + int ref_cnt; + struct list_head list; +}; + struct msm_rotator_dev { void __iomem *io_base; int irq; struct msm_rotator_img_info *img_info[MAX_SESSIONS]; struct clk *core_clk; - int pid_list[MAX_SESSIONS]; + struct msm_rotator_fd_info *fd_info[MAX_SESSIONS]; + struct list_head fd_list; struct clk *pclk; int rot_clk_state; struct regulator *regulator; @@ -854,11 +862,11 @@ static int msm_rotator_do_rotate(unsigned long arg) break; if (s == MAX_SESSIONS) { - dev_dbg(msm_rotator_dev->device, - "%s() : Attempt to use invalid session_id %d\n", + pr_err("%s() : Attempt to use invalid session_id %d\n", __func__, s); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } if (msm_rotator_dev->img_info[s]->enable == 0) { @@ -866,7 +874,8 @@ static int msm_rotator_do_rotate(unsigned long arg) "%s() : Session_id %d not enabled \n", __func__, s); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } img_info = msm_rotator_dev->img_info[s]; @@ -876,7 +885,8 @@ static int msm_rotator_do_rotate(unsigned long arg) &src_planes)) { pr_err("%s: invalid src format\n", __func__); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } if (msm_rotator_get_plane_sizes(img_info->dst.format, img_info->dst.width, @@ -884,7 +894,8 @@ static int msm_rotator_do_rotate(unsigned long arg) &dst_planes)) { pr_err("%s: invalid dst format\n", __func__); rc = -EINVAL; - goto do_rotate_unlock_mutex; + mutex_unlock(&msm_rotator_dev->rotator_lock); + return rc; } rc = get_img(&info.src, (unsigned long *)&in_paddr, @@ -1073,11 +1084,13 @@ static int msm_rotator_do_rotate(unsigned long arg) break; default: rc = -EINVAL; + pr_err("%s(): Unsupported format %u\n", __func__, format); goto do_rotate_exit; } if (rc != 0) { msm_rotator_dev->last_session_idx = INVALID_SESSION; + pr_err("%s(): Invalid session error\n", __func__); goto do_rotate_exit; } @@ -1089,8 +1102,11 @@ static int msm_rotator_do_rotate(unsigned long arg) wait_event(msm_rotator_dev->wq, (msm_rotator_dev->processing == 0)); status = (unsigned char)ioread32(MSM_ROTATOR_INTR_STATUS); - if ((status & 0x03) != 0x01) + if ((status & 0x03) != 0x01) { + pr_err("%s(): AXI Bus Error, issuing SW_RESET\n", __func__); + iowrite32(0x1, MSM_ROTATOR_SW_RESET); rc = -EFAULT; + } iowrite32(0, MSM_ROTATOR_INTR_ENABLE); iowrite32(3, MSM_ROTATOR_INTR_CLEAR); @@ -1111,31 +1127,12 @@ static int msm_rotator_do_rotate(unsigned long arg) return rc; } -static void msm_rotator_set_perf_level(u32 wh, u32 is_rgb) -{ - u32 perf_level; - - if (is_rgb) - perf_level = 1; - else if (wh <= (640 * 480)) - perf_level = 2; - else if (wh <= (736 * 1280)) - perf_level = 3; - else - perf_level = 4; - -#ifdef CONFIG_MSM_BUS_SCALING - msm_bus_scale_client_update_request(msm_rotator_dev->bus_client_handle, - perf_level); -#endif - -} - -static int msm_rotator_start(unsigned long arg, int pid) +static int msm_rotator_start(unsigned long arg, + struct msm_rotator_fd_info *fd_info) { struct msm_rotator_img_info info; int rc = 0; - int s, is_rgb = 0; + int s; int first_free_index = INVALID_SESSION; unsigned int dst_w, dst_h; @@ -1177,7 +1174,6 @@ static int msm_rotator_start(unsigned long arg, int pid) case MDP_XRGB_8888: case MDP_RGBX_8888: case MDP_BGRA_8888: - is_rgb = 1; info.dst.format = info.src.format; break; case MDP_Y_CBCR_H2V2: @@ -1204,15 +1200,13 @@ static int msm_rotator_start(unsigned long arg, int pid) mutex_lock(&msm_rotator_dev->rotator_lock); - msm_rotator_set_perf_level((info.src.width*info.src.height), is_rgb); - for (s = 0; s < MAX_SESSIONS; s++) { if ((msm_rotator_dev->img_info[s] != NULL) && (info.session_id == (unsigned int)msm_rotator_dev->img_info[s] )) { *(msm_rotator_dev->img_info[s]) = info; - msm_rotator_dev->pid_list[s] = pid; + msm_rotator_dev->fd_info[s] = fd_info; if (msm_rotator_dev->last_session_idx == s) msm_rotator_dev->last_session_idx = @@ -1240,16 +1234,16 @@ static int msm_rotator_start(unsigned long arg, int pid) info.session_id = (unsigned int) msm_rotator_dev->img_info[first_free_index]; *(msm_rotator_dev->img_info[first_free_index]) = info; - msm_rotator_dev->pid_list[first_free_index] = pid; - - if (copy_to_user((void __user *)arg, &info, sizeof(info))) - rc = -EFAULT; + msm_rotator_dev->fd_info[first_free_index] = fd_info; } else if (s == MAX_SESSIONS) { dev_dbg(msm_rotator_dev->device, "%s: all sessions in use\n", __func__); rc = -EBUSY; } + if (rc == 0 && copy_to_user((void __user *)arg, &info, sizeof(info))) + rc = -EFAULT; + rotator_start_exit: mutex_unlock(&msm_rotator_dev->rotator_lock); @@ -1275,7 +1269,7 @@ static int msm_rotator_finish(unsigned long arg) INVALID_SESSION; kfree(msm_rotator_dev->img_info[s]); msm_rotator_dev->img_info[s] = NULL; - msm_rotator_dev->pid_list[s] = 0; + msm_rotator_dev->fd_info[s] = NULL; break; } } @@ -1293,24 +1287,45 @@ static int msm_rotator_finish(unsigned long arg) static int msm_rotator_open(struct inode *inode, struct file *filp) { - int *id; + struct msm_rotator_fd_info *tmp, *fd_info = NULL; int i; if (filp->private_data) return -EBUSY; mutex_lock(&msm_rotator_dev->rotator_lock); - id = &msm_rotator_dev->pid_list[0]; - for (i = 0; i < MAX_SESSIONS; i++, id++) { - if (*id == 0) + for (i = 0; i < MAX_SESSIONS; i++) { + if (msm_rotator_dev->fd_info[i] == NULL) break; } - mutex_unlock(&msm_rotator_dev->rotator_lock); - if (i == MAX_SESSIONS) + if (i == MAX_SESSIONS) { + mutex_unlock(&msm_rotator_dev->rotator_lock); return -EBUSY; + } + + list_for_each_entry(tmp, &msm_rotator_dev->fd_list, list) { + if (tmp->pid == current->pid) { + fd_info = tmp; + break; + } + } - filp->private_data = (void *)current->pid; + if (!fd_info) { + fd_info = kzalloc(sizeof(*fd_info), GFP_KERNEL); + if (!fd_info) { + mutex_unlock(&msm_rotator_dev->rotator_lock); + pr_err("%s: insufficient memory to alloc resources\n", + __func__); + return -ENOMEM; + } + list_add(&fd_info->list, &msm_rotator_dev->fd_list); + fd_info->pid = current->pid; + } + fd_info->ref_cnt++; + mutex_unlock(&msm_rotator_dev->rotator_lock); + + filp->private_data = fd_info; return 0; } @@ -1318,21 +1333,33 @@ msm_rotator_open(struct inode *inode, struct file *filp) static int msm_rotator_close(struct inode *inode, struct file *filp) { + struct msm_rotator_fd_info *fd_info; int s; - int pid; - pid = (int)filp->private_data; + fd_info = (struct msm_rotator_fd_info *)filp->private_data; + mutex_lock(&msm_rotator_dev->rotator_lock); + if (--fd_info->ref_cnt > 0) { + mutex_unlock(&msm_rotator_dev->rotator_lock); + return 0; + } + for (s = 0; s < MAX_SESSIONS; s++) { if (msm_rotator_dev->img_info[s] != NULL && - msm_rotator_dev->pid_list[s] == pid) { + msm_rotator_dev->fd_info[s] == fd_info) { + pr_debug("%s: freeing rotator session %p (pid %d)\n", + __func__, msm_rotator_dev->img_info[s], + fd_info->pid); kfree(msm_rotator_dev->img_info[s]); msm_rotator_dev->img_info[s] = NULL; + msm_rotator_dev->fd_info[s] = NULL; if (msm_rotator_dev->last_session_idx == s) msm_rotator_dev->last_session_idx = INVALID_SESSION; } } + list_del(&fd_info->list); + kfree(fd_info); mutex_unlock(&msm_rotator_dev->rotator_lock); return 0; @@ -1341,16 +1368,16 @@ msm_rotator_close(struct inode *inode, struct file *filp) static long msm_rotator_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - int pid; + struct msm_rotator_fd_info *fd_info; if (_IOC_TYPE(cmd) != MSM_ROTATOR_IOCTL_MAGIC) return -ENOTTY; - pid = (int)file->private_data; + fd_info = (struct msm_rotator_fd_info *)file->private_data; switch (cmd) { case MSM_ROTATOR_IOCTL_START: - return msm_rotator_start(arg, pid); + return msm_rotator_start(arg, fd_info); case MSM_ROTATOR_IOCTL_ROTATE: return msm_rotator_do_rotate(arg); case MSM_ROTATOR_IOCTL_FINISH: @@ -1393,6 +1420,7 @@ static int __devinit msm_rotator_probe(struct platform_device *pdev) msm_rotator_dev->imem_owner = IMEM_NO_OWNER; mutex_init(&msm_rotator_dev->imem_lock); + INIT_LIST_HEAD(&msm_rotator_dev->fd_list); msm_rotator_dev->imem_clk_state = CLK_DIS; INIT_DELAYED_WORK(&msm_rotator_dev->imem_clk_work, msm_rotator_imem_clk_work_f); diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 25d139c9dbe..579051ce854 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, vdata->flags = flags; vdata->type = type; spin_lock_init(&vdata->lock); - vdata->refcnt = ATOMIC_INIT(1); + atomic_set(&vdata->refcnt, 1); vma->vm_private_data = vdata; vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND); diff --git a/drivers/char/random.c b/drivers/char/random.c index c35a785005b..fceac955bfb 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -125,21 +125,26 @@ * The current exported interfaces for gathering environmental noise * from the devices are: * + * void add_device_randomness(const void *buf, unsigned int size); * void add_input_randomness(unsigned int type, unsigned int code, * unsigned int value); - * void add_interrupt_randomness(int irq); + * void add_interrupt_randomness(int irq, int irq_flags); * void add_disk_randomness(struct gendisk *disk); * + * add_device_randomness() is for adding data to the random pool that + * is likely to differ between two devices (or possibly even per boot). + * This would be things like MAC addresses or serial numbers, or the + * read-out of the RTC. This does *not* add any actual entropy to the + * pool, but it initializes the pool to different values for devices + * that might otherwise be identical and have very little entropy + * available to them (particularly common in the embedded world). + * * add_input_randomness() uses the input layer interrupt timing, as well as * the event type information from the hardware. * - * add_interrupt_randomness() uses the inter-interrupt timing as random - * inputs to the entropy pool. Note that not all interrupts are good - * sources of randomness! For example, the timer interrupts is not a - * good choice, because the periodicity of the interrupts is too - * regular, and hence predictable to an attacker. Network Interface - * Controller interrupts are a better measure, since the timing of the - * NIC interrupts are more unpredictable. + * add_interrupt_randomness() uses the interrupt timing as random + * inputs to the entropy pool. Using the cycle counters and the irq source + * as inputs, it feeds the randomness roughly once a second. * * add_disk_randomness() uses what amounts to the seek time of block * layer request events, on a per-disk_devt basis, as input to the @@ -248,6 +253,8 @@ #include #include #include +#include +#include #ifdef CONFIG_GENERIC_HARDIRQS # include @@ -256,8 +263,12 @@ #include #include #include +#include #include +#define CREATE_TRACE_POINTS +#include + /* * Configuration information */ @@ -266,6 +277,8 @@ #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 +#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) + /* * The minimum number of bits of entropy before we wake up a read on * /dev/random. Should be enough to do a significant reseed. @@ -420,8 +433,10 @@ struct entropy_store { /* read-write data: */ spinlock_t lock; unsigned add_ptr; + unsigned input_rotate; int entropy_count; - int input_rotate; + int entropy_total; + unsigned int initialized:1; __u8 last_data[EXTRACT_SIZE]; }; @@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = { .pool = nonblocking_pool_data }; +static __u32 const twist_table[8] = { + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; + /* * This function adds bytes into the entropy "pool". It does not * update the entropy estimate. The caller should call @@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = { * it's cheap to do so and helps slightly in the expected case where * the entropy is concentrated in the low-order bits. */ -static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, - int nbytes, __u8 out[64]) +static void _mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes, __u8 out[64]) { - static __u32 const twist_table[8] = { - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; unsigned long i, j, tap1, tap2, tap3, tap4, tap5; int input_rotate; int wordmask = r->poolinfo->poolwords - 1; const char *bytes = in; __u32 w; - unsigned long flags; - /* Taps are constant, so we can load them without holding r->lock. */ tap1 = r->poolinfo->tap1; tap2 = r->poolinfo->tap2; tap3 = r->poolinfo->tap3; tap4 = r->poolinfo->tap4; tap5 = r->poolinfo->tap5; - spin_lock_irqsave(&r->lock, flags); - input_rotate = r->input_rotate; - i = r->add_ptr; + smp_rmb(); + input_rotate = ACCESS_ONCE(r->input_rotate); + i = ACCESS_ONCE(r->add_ptr); /* mix one byte at a time to simplify size handling and churn faster */ while (nbytes--) { @@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, input_rotate += i ? 7 : 14; } - r->input_rotate = input_rotate; - r->add_ptr = i; + ACCESS_ONCE(r->input_rotate) = input_rotate; + ACCESS_ONCE(r->add_ptr) = i; + smp_wmb(); if (out) for (j = 0; j < 16; j++) ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; +} +static void __mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes, __u8 out[64]) +{ + trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); + _mix_pool_bytes(r, in, nbytes, out); +} + +static void mix_pool_bytes(struct entropy_store *r, const void *in, + int nbytes, __u8 out[64]) +{ + unsigned long flags; + + trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); + spin_lock_irqsave(&r->lock, flags); + _mix_pool_bytes(r, in, nbytes, out); spin_unlock_irqrestore(&r->lock, flags); } -static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) +struct fast_pool { + __u32 pool[4]; + unsigned long last; + unsigned short count; + unsigned char rotate; + unsigned char last_timer_intr; +}; + +/* + * This is a fast mixing routine used by the interrupt randomness + * collector. It's hardcoded for an 128 bit pool and assumes that any + * locks that might be needed are taken by the caller. + */ +static void fast_mix(struct fast_pool *f, const void *in, int nbytes) { - mix_pool_bytes_extract(r, in, bytes, NULL); + const char *bytes = in; + __u32 w; + unsigned i = f->count; + unsigned input_rotate = f->rotate; + + while (nbytes--) { + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^ + f->pool[(i + 1) & 3]; + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7]; + input_rotate += (i++ & 3) ? 7 : 14; + } + f->count = i; + f->rotate = input_rotate; } /* @@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { - unsigned long flags; - int entropy_count; + int entropy_count, orig; if (!nbits) return; - spin_lock_irqsave(&r->lock, flags); - DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); - entropy_count = r->entropy_count; +retry: + entropy_count = orig = ACCESS_ONCE(r->entropy_count); entropy_count += nbits; + if (entropy_count < 0) { DEBUG_ENT("negative entropy/overflow\n"); entropy_count = 0; } else if (entropy_count > r->poolinfo->POOLBITS) entropy_count = r->poolinfo->POOLBITS; - r->entropy_count = entropy_count; + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; + + if (!r->initialized && nbits > 0) { + r->entropy_total += nbits; + if (r->entropy_total > 128) + r->initialized = 1; + } + + trace_credit_entropy_bits(r->name, nbits, entropy_count, + r->entropy_total, _RET_IP_); /* should we wake readers? */ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } - spin_unlock_irqrestore(&r->lock, flags); } /********************************************************************* @@ -572,42 +636,24 @@ struct timer_rand_state { unsigned dont_count_entropy:1; }; -#ifndef CONFIG_GENERIC_HARDIRQS - -static struct timer_rand_state *irq_timer_state[NR_IRQS]; - -static struct timer_rand_state *get_timer_rand_state(unsigned int irq) -{ - return irq_timer_state[irq]; -} - -static void set_timer_rand_state(unsigned int irq, - struct timer_rand_state *state) -{ - irq_timer_state[irq] = state; -} - -#else - -static struct timer_rand_state *get_timer_rand_state(unsigned int irq) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - return desc->timer_rand_state; -} - -static void set_timer_rand_state(unsigned int irq, - struct timer_rand_state *state) +/* + * Add device- or boot-specific data to the input and nonblocking + * pools to help initialize them to unique values. + * + * None of this adds any entropy, it is meant to avoid the + * problem of the nonblocking pool having similar initial state + * across largely identical devices. + */ +void add_device_randomness(const void *buf, unsigned int size) { - struct irq_desc *desc; - - desc = irq_to_desc(irq); + unsigned long time = get_cycles() ^ jiffies; - desc->timer_rand_state = state; + mix_pool_bytes(&input_pool, buf, size, NULL); + mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); + mix_pool_bytes(&nonblocking_pool, buf, size, NULL); + mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); } -#endif +EXPORT_SYMBOL(add_device_randomness); static struct timer_rand_state input_timer_state; @@ -624,8 +670,8 @@ static struct timer_rand_state input_timer_state; static void add_timer_randomness(struct timer_rand_state *state, unsigned num) { struct { - cycles_t cycles; long jiffies; + unsigned cycles; unsigned num; } sample; long delta, delta2, delta3; @@ -639,7 +685,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) sample.jiffies = jiffies; sample.cycles = get_cycles(); sample.num = num; - mix_pool_bytes(&input_pool, &sample, sizeof(sample)); + mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL); /* * Calculate number of bits of randomness we probably added. @@ -696,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code, } EXPORT_SYMBOL_GPL(add_input_randomness); -void add_interrupt_randomness(int irq) +static DEFINE_PER_CPU(struct fast_pool, irq_randomness); + +void add_interrupt_randomness(int irq, int irq_flags) { - struct timer_rand_state *state; + struct entropy_store *r; + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); + struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + __u32 input[4], cycles = get_cycles(); + + input[0] = cycles ^ jiffies; + input[1] = irq; + if (regs) { + __u64 ip = instruction_pointer(regs); + input[2] = ip; + input[3] = ip >> 32; + } - state = get_timer_rand_state(irq); + fast_mix(fast_pool, input, sizeof(input)); - if (state == NULL) + if ((fast_pool->count & 1023) && + !time_after(now, fast_pool->last + HZ)) return; - DEBUG_ENT("irq event %d\n", irq); - add_timer_randomness(state, 0x100 + irq); + fast_pool->last = now; + + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); + /* + * If we don't have a valid cycle counter, and we see + * back-to-back timer interrupts, then skip giving credit for + * any entropy. + */ + if (cycles == 0) { + if (irq_flags & __IRQF_TIMER) { + if (fast_pool->last_timer_intr) + return; + fast_pool->last_timer_intr = 1; + } else + fast_pool->last_timer_intr = 0; + } + credit_entropy_bits(r, 1); } #ifdef CONFIG_BLOCK @@ -738,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, */ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) { - __u32 tmp[OUTPUT_POOL_WORDS]; + __u32 tmp[OUTPUT_POOL_WORDS]; if (r->pull && r->entropy_count < nbytes * 8 && r->entropy_count < r->poolinfo->POOLBITS) { @@ -757,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) bytes = extract_entropy(r->pull, tmp, bytes, random_read_wakeup_thresh / 8, rsvd); - mix_pool_bytes(r, tmp, bytes); + mix_pool_bytes(r, tmp, bytes, NULL); credit_entropy_bits(r, bytes*8); } } @@ -816,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, static void extract_buf(struct entropy_store *r, __u8 *out) { int i; - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; + union { + __u32 w[5]; + unsigned long l[LONGS(EXTRACT_SIZE)]; + } hash; + __u32 workspace[SHA_WORKSPACE_WORDS]; __u8 extract[64]; + unsigned long flags; /* Generate a hash across the pool, 16 words (512 bits) at a time */ - sha_init(hash); + sha_init(hash.w); + spin_lock_irqsave(&r->lock, flags); for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha_transform(hash, (__u8 *)(r->pool + i), workspace); + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); /* * We mix the hash back into the pool to prevent backtracking @@ -833,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * brute-forcing the feedback as hard as brute-forcing the * hash. */ - mix_pool_bytes_extract(r, hash, sizeof(hash), extract); + __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); + spin_unlock_irqrestore(&r->lock, flags); /* * To avoid duplicates, we atomically extract a portion of the * pool while mixing, and hash one final time. */ - sha_transform(hash, extract, workspace); + sha_transform(hash.w, extract, workspace); memset(extract, 0, sizeof(extract)); memset(workspace, 0, sizeof(workspace)); @@ -848,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out) * pattern, we fold it in half. Thus, we always feed back * twice as much data as we output. */ - hash[0] ^= hash[3]; - hash[1] ^= hash[4]; - hash[2] ^= rol32(hash[2], 16); - memcpy(out, hash, EXTRACT_SIZE); - memset(hash, 0, sizeof(hash)); + hash.w[0] ^= hash.w[3]; + hash.w[1] ^= hash.w[4]; + hash.w[2] ^= rol32(hash.w[2], 16); + + /* + * If we have a architectural hardware random number + * generator, mix that in, too. + */ + for (i = 0; i < LONGS(EXTRACT_SIZE); i++) { + unsigned long v; + if (!arch_get_random_long(&v)) + break; + hash.l[i] ^= v; + } + + memcpy(out, &hash, EXTRACT_SIZE); + memset(&hash, 0, sizeof(hash)); } static ssize_t extract_entropy(struct entropy_store *r, void *buf, - size_t nbytes, int min, int reserved) + size_t nbytes, int min, int reserved) { ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; - unsigned long flags; + trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, min, reserved); @@ -869,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, extract_buf(r, tmp); if (fips_enabled) { + unsigned long flags; + spin_lock_irqsave(&r->lock, flags); if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) panic("Hardware RNG duplicated output!\n"); @@ -894,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, ssize_t ret = 0, i; __u8 tmp[EXTRACT_SIZE]; + trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_); xfer_secondary_pool(r, nbytes); nbytes = account(r, nbytes, 0, 0); @@ -927,8 +1026,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, /* * This function is the exported kernel interface. It returns some - * number of good random numbers, suitable for seeding TCP sequence - * numbers, etc. + * number of good random numbers, suitable for key generation, seeding + * TCP sequence numbers, etc. It does not use the hw random number + * generator, if available; use get_random_bytes_arch() for that. */ void get_random_bytes(void *buf, int nbytes) { @@ -936,6 +1036,39 @@ void get_random_bytes(void *buf, int nbytes) } EXPORT_SYMBOL(get_random_bytes); +/* + * This function will use the architecture-specific hardware random + * number generator if it is available. The arch-specific hw RNG will + * almost certainly be faster than what we can do in software, but it + * is impossible to verify that it is implemented securely (as + * opposed, to, say, the AES encryption of a sequence number using a + * key known by the NSA). So it's useful if we need the speed, but + * only if we're willing to trust the hardware manufacturer not to + * have put in a back door. + */ +void get_random_bytes_arch(void *buf, int nbytes) +{ + char *p = buf; + + trace_get_random_bytes(nbytes, _RET_IP_); + while (nbytes) { + unsigned long v; + int chunk = min(nbytes, (int)sizeof(unsigned long)); + + if (!arch_get_random_long(&v)) + break; + + memcpy(p, &v, chunk); + p += chunk; + nbytes -= chunk; + } + + if (nbytes) + extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); +} +EXPORT_SYMBOL(get_random_bytes_arch); + + /* * init_std_data - initialize pool with system data * @@ -947,18 +1080,31 @@ EXPORT_SYMBOL(get_random_bytes); */ static void init_std_data(struct entropy_store *r) { - ktime_t now; - unsigned long flags; + int i; + ktime_t now = ktime_get_real(); + unsigned long rv; - spin_lock_irqsave(&r->lock, flags); r->entropy_count = 0; - spin_unlock_irqrestore(&r->lock, flags); - - now = ktime_get_real(); - mix_pool_bytes(r, &now, sizeof(now)); - mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); + r->entropy_total = 0; + mix_pool_bytes(r, &now, sizeof(now), NULL); + for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) { + if (!arch_get_random_long(&rv)) + break; + mix_pool_bytes(r, &rv, sizeof(rv), NULL); + } + mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL); } +/* + * Note that setup_arch() may call add_device_randomness() + * long before we get here. This allows seeding of the pools + * with some platform dependent data very early in the boot + * process. But it limits our options here. We must use + * statically allocated structures that already have all + * initializations complete at compile time. We should also + * take care not to overwrite the precious per platform data + * we were given. + */ static int rand_initialize(void) { init_std_data(&input_pool); @@ -968,24 +1114,6 @@ static int rand_initialize(void) } module_init(rand_initialize); -void rand_initialize_irq(int irq) -{ - struct timer_rand_state *state; - - state = get_timer_rand_state(irq); - - if (state) - return; - - /* - * If kzalloc returns null, we just won't use that entropy - * source. - */ - state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); - if (state) - set_timer_rand_state(irq, state); -} - #ifdef CONFIG_BLOCK void rand_initialize_disk(struct gendisk *disk) { @@ -1093,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) count -= bytes; p += bytes; - mix_pool_bytes(r, buf, bytes); + mix_pool_bytes(r, buf, bytes, NULL); cond_resched(); } @@ -1236,10 +1364,15 @@ static int proc_do_uuid(ctl_table *table, int write, uuid = table->data; if (!uuid) { uuid = tmp_uuid; - uuid[8] = 0; - } - if (uuid[8] == 0) generate_random_uuid(uuid); + } else { + static DEFINE_SPINLOCK(bootid_spinlock); + + spin_lock(&bootid_spinlock); + if (!uuid[8]) + generate_random_uuid(uuid); + spin_unlock(&bootid_spinlock); + } sprintf(buf, "%pU", uuid); @@ -1318,9 +1451,14 @@ late_initcall(random_int_secret_init); DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); unsigned int get_random_int(void) { - __u32 *hash = get_cpu_var(get_random_int_hash); + __u32 *hash; unsigned int ret; + if (arch_get_random_int(&ret)) + return ret; + + hash = get_cpu_var(get_random_int_hash); + hash[0] += current->pid + jiffies + get_cycles(); md5_transform(hash, random_int_secret); ret = hash[0]; diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index b85ee76e4b4..65b9d6f9943 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -1019,17 +1019,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; - size_t in_size = size, out_size; + size_t in_size = size; + ssize_t out_size; /* cannot perform a write until the read has cleared - either via tpm_read or a user_read_timer timeout */ - while (atomic_read(&chip->data_pending) != 0) - msleep(TPM_TIMEOUT); - - mutex_lock(&chip->buffer_mutex); + either via tpm_read or a user_read_timer timeout. + This also prevents splitted buffered writes from blocking here. + */ + if (atomic_read(&chip->data_pending) != 0) + return -EBUSY; if (in_size > TPM_BUFSIZE) - in_size = TPM_BUFSIZE; + return -E2BIG; + + mutex_lock(&chip->buffer_mutex); if (copy_from_user (chip->data_buffer, (void __user *) buf, in_size)) { @@ -1039,6 +1042,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf, /* atomic tpm command send and result receive */ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); + if (out_size < 0) { + mutex_unlock(&chip->buffer_mutex); + return out_size; + } atomic_set(&chip->data_pending, out_size); mutex_unlock(&chip->buffer_mutex); diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c index a1f68af4ccf..acce1a7aac7 100644 --- a/drivers/char/ttyprintk.c +++ b/drivers/char/ttyprintk.c @@ -66,7 +66,7 @@ static int tpk_printk(const unsigned char *buf, int count) tmp[tpk_curr + 1] = '\0'; printk(KERN_INFO "%s%s", tpk_tag, tmp); tpk_curr = 0; - if (buf[i + 1] == '\n') + if ((i + 1) < count && buf[i + 1] == '\n') i++; break; case '\n': diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index fb68b129537..a95256a3a49 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1750,7 +1750,8 @@ static void virtcons_remove(struct virtio_device *vdev) /* Disable interrupts for vqs */ vdev->config->reset(vdev); /* Finish up work that's lined up */ - cancel_work_sync(&portdev->control_work); + if (use_multiport(portdev)) + cancel_work_sync(&portdev->control_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index faf7c521784..df41080bfcc 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -329,6 +329,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, cpufreq_update_policy(cpu); break; case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: cpufreq_stats_free_sysfs(cpu); break; case CPU_DEAD: diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index bce576d7478..f6cd315ad94 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -32,7 +32,6 @@ #include #include #include -#include /* for current / set_cpus_allowed() */ #include #include @@ -54,6 +53,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); static int cpu_family = CPU_OPTERON; +/* array to map SW pstate number to acpi state */ +static u32 ps_to_as[8]; + /* core performance boost */ static bool cpb_capable, cpb_enabled; static struct msr __percpu *msrs; @@ -80,9 +82,9 @@ static u32 find_khz_freq_from_fid(u32 fid) } static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, - u32 pstate) + u32 pstate) { - return data[pstate].frequency; + return data[ps_to_as[pstate]].frequency; } /* Return the vco fid for an input fid @@ -926,23 +928,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, invalidate_entry(powernow_table, i); continue; } - rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); - if (!(hi & HW_PSTATE_VALID_MASK)) { - pr_debug("invalid pstate %d, ignoring\n", index); - invalidate_entry(powernow_table, i); - continue; - } - powernow_table[i].index = index; + ps_to_as[index] = i; /* Frequency may be rounded for these */ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) || boot_cpu_data.x86 == 0x11) { + + rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); + if (!(hi & HW_PSTATE_VALID_MASK)) { + pr_debug("invalid pstate %d, ignoring\n", index); + invalidate_entry(powernow_table, i); + continue; + } + powernow_table[i].frequency = freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); } else powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; + + powernow_table[i].index = index; } return 0; } @@ -1125,16 +1131,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, return res; } -/* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +struct powernowk8_target_arg { + struct cpufreq_policy *pol; + unsigned targfreq; + unsigned relation; +}; + +static long powernowk8_target_fn(void *arg) { - cpumask_var_t oldmask; + struct powernowk8_target_arg *pta = arg; + struct cpufreq_policy *pol = pta->pol; + unsigned targfreq = pta->targfreq; + unsigned relation = pta->relation; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; unsigned int newstate; - int ret = -EIO; + int ret; if (!data) return -EINVAL; @@ -1142,29 +1155,16 @@ static int powernowk8_target(struct cpufreq_policy *pol, checkfid = data->currfid; checkvid = data->currvid; - /* only run on specific CPU from here on. */ - /* This is poor form: use a workqueue or smp_call_function_single */ - if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) - return -ENOMEM; - - cpumask_copy(oldmask, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; - } - if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing targ, change pending bit set\n"); - goto err_out; + return -EIO; } pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) - goto err_out; + return -EIO; if (cpu_family != CPU_HW_PSTATE) { pr_debug("targ: curr fid 0x%x, vid 0x%x\n", @@ -1182,35 +1182,41 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) - goto err_out; + return -EIO; mutex_lock(&fidvid_mutex); powernow_k8_acpi_pst_values(data, newstate); if (cpu_family == CPU_HW_PSTATE) - ret = transition_frequency_pstate(data, newstate); + ret = transition_frequency_pstate(data, + data->powernow_table[newstate].index); else ret = transition_frequency_fidvid(data, newstate); if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); - ret = 1; mutex_unlock(&fidvid_mutex); - goto err_out; + return 1; } mutex_unlock(&fidvid_mutex); if (cpu_family == CPU_HW_PSTATE) pol->cur = find_khz_freq_from_pstate(data->powernow_table, - newstate); + data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); - ret = 0; -err_out: - set_cpus_allowed_ptr(current, oldmask); - free_cpumask_var(oldmask); - return ret; + return 0; +} + +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) +{ + struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, + .relation = relation }; + + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } /* Driver entry point to verify the policy and range of frequencies */ diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 8088e4443c5..e0145961303 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -173,6 +173,7 @@ config CRYPTO_DEV_MV_CESA select CRYPTO_ALGAPI select CRYPTO_AES select CRYPTO_BLKCIPHER2 + select CRYPTO_HASH help This driver allows you to utilize the Cryptographic Engines and Security Accelerator (CESA) which can be found on the Marvell Orion diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 38a3297ae2b..f53dd83438b 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -713,6 +713,7 @@ static int mv_hash_final(struct ahash_request *req) { struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); + ahash_request_set_crypt(req, NULL, req->result, 0); mv_update_hash_req_ctx(ctx, 1, 0); return mv_handle_req(&req->base); } diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 4abd089a094..605fd20b1e5 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -409,6 +409,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) spin_lock_irqsave(&dca_lock, flags); + if (list_empty(&dca_domains)) { + spin_unlock_irqrestore(&dca_lock, flags); + return; + } + list_del(&dca->node); pci_rc = dca_pci_rc_from_dev(dev); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 25cf327cd1c..6a718b77a2b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -200,18 +200,17 @@ config PL330_DMA platform_data for a dma-pl330 device. config PCH_DMA - tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" + tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223/ML7831) DMA support" depends on PCI && X86 select DMA_ENGINE help Enable support for Intel EG20T PCH DMA engine. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7213 and ML7223. - ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is - for MP(Media Phone) use. - ML7213/ML7223 is companion chip for Intel Atom E6xx series. - ML7213/ML7223 is completely compatible for Intel EG20T PCH. + Output Hub), ML7213, ML7223 and ML7831. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is + for MP(Media Phone) use and ML7831 IOH is for general purpose use. + ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. config IMX_SDMA tristate "i.MX SDMA support" diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 36144f88d71..1357c3b1e3a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -237,10 +237,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) vdbg_dump_regs(atchan); - /* clear any pending interrupt */ - while (dma_readl(atdma, EBCISR)) - cpu_relax(); - channel_writel(atchan, SADDR, 0); channel_writel(atchan, DADDR, 0); channel_writel(atchan, CTRLA, 0); @@ -678,7 +674,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); if (unlikely(!atslave || !sg_len)) { - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); return NULL; } @@ -706,6 +702,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -740,6 +741,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -773,6 +779,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available\n"); +err: atc_desc_put(atchan, first); return NULL; } @@ -1279,7 +1286,7 @@ static int __init at_dma_probe(struct platform_device *pdev) tasklet_init(&atchan->tasklet, atc_tasklet, (unsigned long)atchan); - atc_enable_irq(atchan); + atc_enable_chan_irq(atdma, i); } /* set base routines */ @@ -1348,7 +1355,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) struct at_dma_chan *atchan = to_at_dma_chan(chan); /* Disable interrupts */ - atc_disable_irq(atchan); + atc_disable_chan_irq(atdma, chan->chan_id); tasklet_disable(&atchan->tasklet); tasklet_kill(&atchan->tasklet); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 087dbf1dd39..19ed47056da 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -319,28 +319,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) } -static void atc_setup_irq(struct at_dma_chan *atchan, int on) +static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on) { - struct at_dma *atdma = to_at_dma(atchan->chan_common.device); - u32 ebci; + u32 ebci; /* enable interrupts on buffer transfer completion & error */ - ebci = AT_DMA_BTC(atchan->chan_common.chan_id) - | AT_DMA_ERR(atchan->chan_common.chan_id); + ebci = AT_DMA_BTC(chan_id) + | AT_DMA_ERR(chan_id); if (on) dma_writel(atdma, EBCIER, ebci); else dma_writel(atdma, EBCIDR, ebci); } -static inline void atc_enable_irq(struct at_dma_chan *atchan) +static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id) { - atc_setup_irq(atchan, 1); + atc_setup_irq(atdma, chan_id, 1); } -static inline void atc_disable_irq(struct at_dma_chan *atchan) +static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id) { - atc_setup_irq(atchan, 0); + atc_setup_irq(atdma, chan_id, 0); } diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d845dc4b710..6e339269111 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -949,7 +949,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) goto free_resources; } } - dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); /* skip validate if the capability is not present */ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ff5b38f9d45..e6f128a2336 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -45,7 +45,8 @@ #define DMA_STATUS_MASK_BITS 0x3 #define DMA_STATUS_SHIFT_BITS 16 #define DMA_STATUS_IRQ(x) (0x1 << (x)) -#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) +#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) +#define DMA_STATUS2_ERR(x) (0x1 << (x)) #define DMA_DESC_WIDTH_SHIFT_BITS 12 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) @@ -59,7 +60,10 @@ #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 -#define MAX_CHAN_NR 8 +#define MAX_CHAN_NR 12 + +#define DMA_MASK_CTL0_MODE 0x33333333 +#define DMA_MASK_CTL2_MODE 0x00003333 static unsigned int init_nr_desc_per_channel = 64; module_param(init_nr_desc_per_channel, uint, 0644); @@ -133,6 +137,7 @@ struct pch_dma { #define PCH_DMA_CTL3 0x0C #define PCH_DMA_STS0 0x10 #define PCH_DMA_STS1 0x14 +#define PCH_DMA_STS2 0x18 #define dma_readl(pd, name) \ readl((pd)->membase + PCH_DMA_##name) @@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable) { struct pch_dma *pd = to_pd(chan->device); u32 val; + int pos; + + if (chan->chan_id < 8) + pos = chan->chan_id; + else + pos = chan->chan_id + 8; val = dma_readl(pd, CTL2); if (enable) - val |= 0x1 << chan->chan_id; + val |= 0x1 << pos; else - val &= ~(0x1 << chan->chan_id); + val &= ~(0x1 << pos); dma_writel(pd, CTL2, val); @@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan) struct pch_dma_chan *pd_chan = to_pd_chan(chan); struct pch_dma *pd = to_pd(chan->device); u32 val; + u32 mask_mode; + u32 mask_ctl; if (chan->chan_id < 8) { val = dma_readl(pd, CTL0); + mask_mode = DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id); + mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + val &= mask_mode; if (pd_chan->dir == DMA_TO_DEVICE) val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + DMA_CTL0_DIR_SHIFT_BITS); @@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan) val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + DMA_CTL0_DIR_SHIFT_BITS)); + val |= mask_ctl; dma_writel(pd, CTL0, val); } else { int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ val = dma_readl(pd, CTL3); + mask_mode = DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch); + mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch)); + val &= mask_mode; if (pd_chan->dir == DMA_TO_DEVICE) val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + DMA_CTL0_DIR_SHIFT_BITS); else val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + DMA_CTL0_DIR_SHIFT_BITS)); - + val |= mask_ctl; dma_writel(pd, CTL3, val); } @@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) { struct pch_dma *pd = to_pd(chan->device); u32 val; + u32 mask_ctl; + u32 mask_dir; if (chan->chan_id < 8) { + mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ + DMA_CTL0_DIR_SHIFT_BITS); val = dma_readl(pd, CTL0); - - val &= ~(DMA_CTL0_MODE_MASK_BITS << - (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + val &= mask_dir; val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); - + val |= mask_ctl; dma_writel(pd, CTL0, val); } else { int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ - + mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch)); + mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ + DMA_CTL0_DIR_SHIFT_BITS); val = dma_readl(pd, CTL3); - - val &= ~(DMA_CTL0_MODE_MASK_BITS << - (DMA_CTL0_BITS_PER_CH * ch)); + val &= mask_dir; val |= mode << (DMA_CTL0_BITS_PER_CH * ch); - + val |= mask_ctl; dma_writel(pd, CTL3, val); - } dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", chan->chan_id, val); } -static u32 pdc_get_status(struct pch_dma_chan *pd_chan) +static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) { struct pch_dma *pd = to_pd(pd_chan->chan.device); u32 val; @@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan) DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); } +static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) +{ + struct pch_dma *pd = to_pd(pd_chan->chan.device); + u32 val; + + val = dma_readl(pd, STS2); + return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + + DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); +} + static bool pdc_is_idle(struct pch_dma_chan *pd_chan) { - if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) + u32 sts; + + if (pd_chan->chan.chan_id < 8) + sts = pdc_get_status0(pd_chan); + else + sts = pdc_get_status2(pd_chan); + + + if (sts == DMA_STATUS_IDLE) return true; else return false; @@ -443,7 +489,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); if (!ret) { - ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); + ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); if (ret) { spin_lock(&pd_chan->lock); pd_chan->descs_allocated++; @@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) list_add_tail(&desc->desc_node, &tmp_list); } - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); list_splice(&tmp_list, &pd_chan->free_list); pd_chan->descs_allocated = i; pd_chan->completed_cookie = chan->cookie = 1; - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); pdc_enable_irq(chan, 1); @@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan) BUG_ON(!list_empty(&pd_chan->active_list)); BUG_ON(!list_empty(&pd_chan->queue)); - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); list_splice_init(&pd_chan->free_list, &tmp_list); pd_chan->descs_allocated = 0; - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) pci_pool_free(pd->pool, desc, desc->txd.phys); @@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t last_completed; int ret; - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); last_completed = pd_chan->completed_cookie; last_used = chan->cookie; - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); ret = dma_async_is_complete(cookie, last_completed, last_used); @@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, if (cmd != DMA_TERMINATE_ALL) return -ENXIO; - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); @@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, list_for_each_entry_safe(desc, _d, &list, desc_node) pdc_chain_complete(pd_chan, desc); - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); return 0; } @@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid) struct pch_dma *pd = (struct pch_dma *)devid; struct pch_dma_chan *pd_chan; u32 sts0; + u32 sts2; int i; - int ret = IRQ_NONE; + int ret0 = IRQ_NONE; + int ret2 = IRQ_NONE; sts0 = dma_readl(pd, STS0); + sts2 = dma_readl(pd, STS2); dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); for (i = 0; i < pd->dma.chancnt; i++) { pd_chan = &pd->channels[i]; - if (sts0 & DMA_STATUS_IRQ(i)) { - if (sts0 & DMA_STATUS_ERR(i)) - set_bit(0, &pd_chan->err_status); + if (i < 8) { + if (sts0 & DMA_STATUS_IRQ(i)) { + if (sts0 & DMA_STATUS0_ERR(i)) + set_bit(0, &pd_chan->err_status); - tasklet_schedule(&pd_chan->tasklet); - ret = IRQ_HANDLED; - } + tasklet_schedule(&pd_chan->tasklet); + ret0 = IRQ_HANDLED; + } + } else { + if (sts2 & DMA_STATUS_IRQ(i - 8)) { + if (sts2 & DMA_STATUS2_ERR(i)) + set_bit(0, &pd_chan->err_status); + tasklet_schedule(&pd_chan->tasklet); + ret2 = IRQ_HANDLED; + } + } } /* clear interrupt bits in status register */ - dma_writel(pd, STS0, sts0); + if (ret0) + dma_writel(pd, STS0, sts0); + if (ret2) + dma_writel(pd, STS2, sts2); - return ret; + return ret0 | ret2; } #ifdef CONFIG_PM @@ -960,6 +1021,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B +#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 +#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, @@ -972,6 +1035,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ { 0, }, }; @@ -999,7 +1064,7 @@ static void __exit pch_dma_exit(void) module_init(pch_dma_init); module_exit(pch_dma_exit); -MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " - "DMA controller driver"); +MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICON ML7213/ML7223/ML7831 IOH" + "DMA controller driver"); MODULE_AUTHOR("Yong Wang "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9a8bebcf6b1..feb2d10bba6 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -161,8 +161,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) * memory controller and apply to register. Search for the first * bandwidth entry that is greater or equal than the setting requested * and program that. If at last entry, turn off DRAM scrubbing. + * + * If no suitable bandwidth is found, turn off DRAM scrubbing entirely + * by falling back to the last element in scrubrates[]. */ - for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { + for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { /* * skip scrub rates which aren't recommended * (see F10 BKDG, F3x58) @@ -172,12 +175,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) if (scrubrates[i].bandwidth <= new_bw) break; - - /* - * if no suitable bandwidth found, turn off DRAM scrubbing - * entirely by falling back to the last element in the - * scrubrates array. - */ } scrubval = scrubrates[i].scrubval; diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 495198ad059..8cc8676fa21 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -257,7 +257,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj, struct edac_pci_dev_attribute *edac_pci_dev; edac_pci_dev = (struct edac_pci_dev_attribute *)attr; - if (edac_pci_dev->show) + if (edac_pci_dev->store) return edac_pci_dev->store(edac_pci_dev->value, buffer, count); return -EIO; } diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index f6cf448d69b..240966b4c7e 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1842,11 +1842,9 @@ static int i7core_mce_check_error(void *priv, struct mce *mce) if (mce->bank != 8) return 0; -#ifdef CONFIG_SMP /* Only handle if it is the right mc controller */ if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket) return 0; -#endif smp_rmb(); if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index e6ad3bb6c1a..b97d4f00bb5 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -216,15 +216,33 @@ struct inbound_phy_packet_event { struct fw_cdev_event_phy_packet phy_packet; }; -static inline void __user *u64_to_uptr(__u64 value) +#ifdef CONFIG_COMPAT +static void __user *u64_to_uptr(u64 value) +{ + if (is_compat_task()) + return compat_ptr(value); + else + return (void __user *)(unsigned long)value; +} + +static u64 uptr_to_u64(void __user *ptr) +{ + if (is_compat_task()) + return ptr_to_compat(ptr); + else + return (u64)(unsigned long)ptr; +} +#else +static inline void __user *u64_to_uptr(u64 value) { return (void __user *)(unsigned long)value; } -static inline __u64 uptr_to_u64(void __user *ptr) +static inline u64 uptr_to_u64(void __user *ptr) { - return (__u64)(unsigned long)ptr; + return (u64)(unsigned long)ptr; } +#endif /* CONFIG_COMPAT */ static int fw_device_op_open(struct inode *inode, struct file *file) { @@ -453,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) client->bus_reset_closure = a->bus_reset_closure; if (a->bus_reset != 0) { fill_bus_reset_event(&bus_reset, client); - ret = copy_to_user(u64_to_uptr(a->bus_reset), - &bus_reset, sizeof(bus_reset)); + /* unaligned size of bus_reset is 36 bytes */ + ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); } if (ret == 0 && list_empty(&client->link)) list_add_tail(&client->link, &client->device->client_list); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 95a47140189..812cea37a5b 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = { static int read_rom(struct fw_device *device, int generation, int index, u32 *data) { - int rcode; + u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; + int i, rcode; /* device->node_id, accessed below, must not be older than generation */ smp_rmb(); - rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, - device->node_id, generation, device->max_speed, - (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, - data, 4); + for (i = 10; i < 100; i += 10) { + rcode = fw_run_transaction(device->card, + TCODE_READ_QUADLET_REQUEST, device->node_id, + generation, device->max_speed, offset, data, 4); + if (rcode != RCODE_BUSY) + break; + msleep(i); + } be32_to_cpus(data); return rcode; @@ -990,6 +995,10 @@ static void fw_device_init(struct work_struct *work) ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? idr_get_new(&fw_device_idr, device, &minor) : -ENOMEM; + if (minor >= 1 << MINORBITS) { + idr_remove(&fw_device_idr, minor); + minor = -ENOSPC; + } up_write(&fw_device_rwsem); if (ret < 0) diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index b9762d07198..e74750bc0b7 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -863,8 +863,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { buf_ptr += 2; length -= IEEE1394_GASP_HDR_SIZE; - fwnet_incoming_packet(dev, buf_ptr, length, - source_node_id, -1, true); + fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, + context->card->generation, true); } packet.payload_length = dev->rcv_buffer_size; @@ -959,7 +959,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) break; } - skb_pull(skb, ptask->max_payload); + if (ptask->dest_node == IEEE1394_ALL_NODES) { + skb_pull(skb, + ptask->max_payload + IEEE1394_GASP_HDR_SIZE); + } else { + skb_pull(skb, ptask->max_payload); + } if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, dg_size, fg_off, datagram_label); @@ -1062,7 +1067,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) smp_rmb(); node_id = dev->card->node_id; - p = skb_push(ptask->skb, 8); + p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | RFC2734_SW_VERSION, &p[4]); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index ee76c8ec72f..271fc518dec 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -262,6 +262,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) static char ohci_driver_name[] = KBUILD_MODNAME; #define PCI_DEVICE_ID_AGERE_FW643 0x5901 +#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd @@ -285,6 +286,9 @@ static const struct { {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, QUIRK_NO_MSI}, + {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, + QUIRK_RESET_PACKET}, + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, QUIRK_NO_MSI}, @@ -295,7 +299,7 @@ static const struct { QUIRK_NO_MSI}, {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, - QUIRK_CYCLE_TIMER}, + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, @@ -2554,15 +2558,14 @@ static int handle_ir_buffer_fill(struct context *context, struct iso_context *ctx = container_of(context, struct iso_context, context); - if (!last->transfer_status) + if (last->res_count != 0) /* Descriptor(s) not done yet, stop iteration */ return 0; if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) ctx->base.callback.mc(&ctx->base, le32_to_cpu(last->data_address) + - le16_to_cpu(last->req_count) - - le16_to_cpu(last->res_count), + le16_to_cpu(last->req_count), ctx->base.callback_data); return 1; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index bcb1126e3d0..03ab4e8d3e7 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -6,6 +6,7 @@ #include #include #include +#include #include /* @@ -15,6 +16,7 @@ */ static char dmi_empty_string[] = " "; +static u16 __initdata dmi_ver; /* * Catch too early calls to dmi_check_system(): */ @@ -111,16 +113,18 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, dmi_table(buf, dmi_len, dmi_num, decode, NULL); + add_device_randomness(buf, dmi_len); + dmi_iounmap(buf, dmi_len); return 0; } -static int __init dmi_checksum(const u8 *buf) +static int __init dmi_checksum(const u8 *buf, u8 len) { u8 sum = 0; int a; - for (a = 0; a < 15; a++) + for (a = 0; a < len; a++) sum += buf[a]; return sum == 0; @@ -158,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde return; for (i = 0; i < 16 && (is_ff || is_00); i++) { - if(d[i] != 0x00) is_ff = 0; - if(d[i] != 0xFF) is_00 = 0; + if (d[i] != 0x00) + is_00 = 0; + if (d[i] != 0xFF) + is_ff = 0; } if (is_ff || is_00) @@ -169,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde if (!s) return; - sprintf(s, "%pUB", d); + /* + * As of version 2.6 of the SMBIOS specification, the first 3 fields of + * the UUID are supposed to be little-endian encoded. The specification + * says that this is the defacto standard. + */ + if (dmi_ver >= 0x0206) + sprintf(s, "%pUL", d); + else + sprintf(s, "%pUB", d); dmi_ident[slot] = s; } @@ -401,26 +415,53 @@ static int __init dmi_present(const char __iomem *p) u8 buf[15]; memcpy_fromio(buf, p, 15); - if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { + if (dmi_checksum(buf, 15)) { dmi_num = (buf[13] << 8) | buf[12]; dmi_len = (buf[7] << 8) | buf[6]; dmi_base = (buf[11] << 24) | (buf[10] << 16) | (buf[9] << 8) | buf[8]; - /* - * DMI version 0.0 means that the real version is taken from - * the SMBIOS version, which we don't know at this point. - */ - if (buf[14] != 0) - printk(KERN_INFO "DMI %d.%d present.\n", - buf[14] >> 4, buf[14] & 0xF); - else - printk(KERN_INFO "DMI present.\n"); if (dmi_walk_early(dmi_decode) == 0) { + if (dmi_ver) + pr_info("SMBIOS %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); + else { + dmi_ver = (buf[14] & 0xF0) << 4 | + (buf[14] & 0x0F); + pr_info("Legacy DMI %d.%d present.\n", + dmi_ver >> 8, dmi_ver & 0xFF); + } dmi_dump_ids(); return 0; } } + dmi_ver = 0; + return 1; +} + +static int __init smbios_present(const char __iomem *p) +{ + u8 buf[32]; + + memcpy_fromio(buf, p, 32); + if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) { + dmi_ver = (buf[6] << 8) + buf[7]; + + /* Some BIOS report weird SMBIOS version, fix that up */ + switch (dmi_ver) { + case 0x021F: + case 0x0221: + pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", + dmi_ver & 0xFF, 3); + dmi_ver = 0x0203; + break; + case 0x0233: + pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6); + dmi_ver = 0x0206; + break; + } + return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16); + } return 1; } @@ -441,7 +482,7 @@ void __init dmi_scan_machine(void) if (p == NULL) goto error; - rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ + rc = smbios_present(p); dmi_iounmap(p, 32); if (!rc) { dmi_available = 1; @@ -459,7 +500,12 @@ void __init dmi_scan_machine(void) goto error; for (q = p; q < p + 0x10000; q += 16) { - rc = dmi_present(q); + if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0) + rc = smbios_present(q); + else if (memcmp(q, "_DMI_", 5) == 0) + rc = dmi_present(q); + else + continue; if (!rc) { dmi_available = 1; dmi_iounmap(p, 0x10000); diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 5f29aafd446..c5cce9ca43e 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -119,6 +119,8 @@ struct efivar_attribute { ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); }; +static struct efivars __efivars; +static struct efivar_operations ops; #define EFIVAR_ATTR(_name, _mode, _show, _store) \ struct efivar_attribute efivar_attr_##_name = { \ @@ -141,23 +143,213 @@ efivar_create_sysfs_entry(struct efivars *efivars, /* Return the number of unicode characters in data */ static unsigned long -utf8_strlen(efi_char16_t *data, unsigned long maxlength) +utf16_strnlen(efi_char16_t *s, size_t maxlength) { unsigned long length = 0; - while (*data++ != 0 && length < maxlength) + while (*s++ != 0 && length < maxlength) length++; return length; } +static inline unsigned long +utf16_strlen(efi_char16_t *s) +{ + return utf16_strnlen(s, ~0UL); +} + /* * Return the number of bytes is the length of this string * Note: this is NOT the same as the number of unicode characters */ static inline unsigned long -utf8_strsize(efi_char16_t *data, unsigned long maxlength) +utf16_strsize(efi_char16_t *data, unsigned long maxlength) +{ + return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); +} + +static bool +validate_device_path(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + struct efi_generic_dev_path *node; + int offset = 0; + + node = (struct efi_generic_dev_path *)buffer; + + if (len < sizeof(*node)) + return false; + + while (offset <= len - sizeof(*node) && + node->length >= sizeof(*node) && + node->length <= len - offset) { + offset += node->length; + + if ((node->type == EFI_DEV_END_PATH || + node->type == EFI_DEV_END_PATH2) && + node->sub_type == EFI_DEV_END_ENTIRE) + return true; + + node = (struct efi_generic_dev_path *)(buffer + offset); + } + + /* + * If we're here then either node->length pointed past the end + * of the buffer or we reached the end of the buffer without + * finding a device path end node. + */ + return false; +} + +static bool +validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + /* An array of 16-bit integers */ + if ((len % 2) != 0) + return false; + + return true; +} + +static bool +validate_load_option(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + u16 filepathlength; + int i, desclength = 0, namelen; + + namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { + if (var->VariableName[i] > 127 || + hex_to_bin(var->VariableName[i] & 0xff) < 0) + return true; + } + + /* Reject it if there's 4 digits of hex and then further content */ + if (namelen > match + 4) + return false; + + /* A valid entry must be at least 8 bytes */ + if (len < 8) + return false; + + filepathlength = buffer[4] | buffer[5] << 8; + + /* + * There's no stored length for the description, so it has to be + * found by hand + */ + desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; + + /* Each boot entry must have a descriptor */ + if (!desclength) + return false; + + /* + * If the sum of the length of the description, the claimed filepath + * length and the original header are greater than the length of the + * variable, it's malformed + */ + if ((desclength + filepathlength + 6) > len) + return false; + + /* + * And, finally, check the filepath + */ + return validate_device_path(var, match, buffer + desclength + 6, + filepathlength); +} + +static bool +validate_uint16(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + /* A single 16-bit integer */ + if (len != 2) + return false; + + return true; +} + +static bool +validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + int i; + + for (i = 0; i < len; i++) { + if (buffer[i] > 127) + return false; + + if (buffer[i] == 0) + return true; + } + + return false; +} + +struct variable_validate { + char *name; + bool (*validate)(struct efi_variable *var, int match, u8 *data, + unsigned long len); +}; + +static const struct variable_validate variable_validate[] = { + { "BootNext", validate_uint16 }, + { "BootOrder", validate_boot_order }, + { "DriverOrder", validate_boot_order }, + { "Boot*", validate_load_option }, + { "Driver*", validate_load_option }, + { "ConIn", validate_device_path }, + { "ConInDev", validate_device_path }, + { "ConOut", validate_device_path }, + { "ConOutDev", validate_device_path }, + { "ErrOut", validate_device_path }, + { "ErrOutDev", validate_device_path }, + { "Timeout", validate_uint16 }, + { "Lang", validate_ascii_string }, + { "PlatformLang", validate_ascii_string }, + { "", NULL }, +}; + +static bool +validate_var(struct efi_variable *var, u8 *data, unsigned long len) { - return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); + int i; + u16 *unicode_name = var->VariableName; + + for (i = 0; variable_validate[i].validate != NULL; i++) { + const char *name = variable_validate[i].name; + int match; + + for (match = 0; ; match++) { + char c = name[match]; + u16 u = unicode_name[match]; + + /* All special variables are plain ascii */ + if (u > 127) + return true; + + /* Wildcard in the matching name means we've matched */ + if (c == '*') + return variable_validate[i].validate(var, + match, data, len); + + /* Case sensitive match */ + if (c != u) + break; + + /* Reached the end of the string while matching */ + if (!c) + return variable_validate[i].validate(var, + match, data, len); + } + } + + return true; } static efi_status_t @@ -210,12 +402,23 @@ efivar_attr_read(struct efivar_entry *entry, char *buf) if (status != EFI_SUCCESS) return -EIO; - if (var->Attributes & 0x1) + if (var->Attributes & EFI_VARIABLE_NON_VOLATILE) str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n"); - if (var->Attributes & 0x2) + if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS) str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n"); - if (var->Attributes & 0x4) + if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS) str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n"); + if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD) + str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n"); + if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS) + str += sprintf(str, + "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n"); + if (var->Attributes & + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS) + str += sprintf(str, + "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n"); + if (var->Attributes & EFI_VARIABLE_APPEND_WRITE) + str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n"); return str - buf; } @@ -283,6 +486,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) return -EINVAL; } + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || + validate_var(new_var, new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } + spin_lock(&efivars->lock); status = efivars->ops->set_variable(new_var->VariableName, &new_var->VendorGuid, @@ -408,14 +617,20 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || + validate_var(new_var, new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } + spin_lock(&efivars->lock); /* * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf8_strsize(new_var->VariableName, 1024); + strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); + strsize2 = utf16_strsize(new_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), new_var->VariableName, strsize1) && @@ -447,8 +662,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, /* Create the entry in sysfs. Locking is not required here */ status = efivar_create_sysfs_entry(efivars, - utf8_strsize(new_var->VariableName, - 1024), + utf16_strsize(new_var->VariableName, + 1024), new_var->VariableName, &new_var->VendorGuid); if (status) { @@ -477,8 +692,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf8_strsize(del_var->VariableName, 1024); + strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); + strsize2 = utf16_strsize(del_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), del_var->VariableName, strsize1) && @@ -517,6 +732,53 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, return count; } +static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor) +{ + struct efivar_entry *entry, *n; + struct efivars *efivars = &__efivars; + unsigned long strsize1, strsize2; + bool found = false; + + strsize1 = utf16_strsize(variable_name, 1024); + list_for_each_entry_safe(entry, n, &efivars->list, list) { + strsize2 = utf16_strsize(entry->var.VariableName, 1024); + if (strsize1 == strsize2 && + !memcmp(variable_name, &(entry->var.VariableName), + strsize2) && + !efi_guidcmp(entry->var.VendorGuid, + *vendor)) { + found = true; + break; + } + } + return found; +} + +/* + * Returns the size of variable_name, in bytes, including the + * terminating NULL character, or variable_name_size if no NULL + * character is found among the first variable_name_size bytes. + */ +static unsigned long var_name_strnsize(efi_char16_t *variable_name, + unsigned long variable_name_size) +{ + unsigned long len; + efi_char16_t c; + + /* + * The variable name is, by definition, a NULL-terminated + * string, so make absolutely sure that variable_name_size is + * the value we expect it to be. If not, return the real size. + */ + for (len = 2; len <= variable_name_size; len += sizeof(c)) { + c = variable_name[(len / sizeof(c)) - 1]; + if (!c) + break; + } + + return min(len, variable_name_size); +} + /* * Let's not leave out systab information that snuck into * the efivars driver @@ -704,6 +966,28 @@ void unregister_efivars(struct efivars *efivars) } EXPORT_SYMBOL_GPL(unregister_efivars); +/* + * Print a warning when duplicate EFI variables are encountered and + * disable the sysfs workqueue since the firmware is buggy. + */ +static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, + unsigned long len16) +{ + size_t i, len8 = len16 / sizeof(efi_char16_t); + char *s8; + + s8 = kzalloc(len8, GFP_KERNEL); + if (!s8) + return; + + for (i = 0; i < len8; i++) + s8[i] = s16[i]; + + printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", + s8, vendor_guid); + kfree(s8); +} + int register_efivars(struct efivars *efivars, const struct efivar_operations *ops, struct kobject *parent_kobj) @@ -744,6 +1028,24 @@ int register_efivars(struct efivars *efivars, &vendor_guid); switch (status) { case EFI_SUCCESS: + variable_name_size = var_name_strnsize(variable_name, + variable_name_size); + + /* + * Some firmware implementations return the + * same variable name on multiple calls to + * get_next_variable(). Terminate the loop + * immediately as there is no guarantee that + * we'll ever see a different variable name, + * and may end up looping here forever. + */ + if (variable_is_present(variable_name, &vendor_guid)) { + dup_variable_bug(variable_name, &vendor_guid, + variable_name_size); + status = EFI_NOT_FOUND; + break; + } + efivar_create_sysfs_entry(efivars, variable_name_size, variable_name, @@ -770,9 +1072,6 @@ int register_efivars(struct efivars *efivars, } EXPORT_SYMBOL_GPL(register_efivars); -static struct efivars __efivars; -static struct efivar_operations ops; - /* * For now we register the efi subsystem with the firmware subsystem * and the vars subsystem with the efi subsystem. In the future, it diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c index 51e0e2d8fac..a330492e06f 100644 --- a/drivers/firmware/pcdp.c +++ b/drivers/firmware/pcdp.c @@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline) if (efi.hcdp == EFI_INVALID_TABLE_ADDR) return -ENODEV; - pcdp = ioremap(efi.hcdp, 4096); + pcdp = early_ioremap(efi.hcdp, 4096); printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp); if (strstr(cmdline, "console=hcdp")) { @@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline) } out: - iounmap(pcdp); + early_iounmap(pcdp, 4096); return rc; } diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 385e9c70588..e73c9784990 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -350,18 +350,19 @@ config GPIO_LANGWELL Say Y here to support Intel Langwell/Penwell GPIO. config GPIO_PCH - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO" + tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO" depends on PCI && X86 help This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded processor. This driver can access PCH GPIO device. - This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7223. + This driver also can be used for LAPIS Semiconductor IOH(Input/ + Output Hub), ML7223 and ML7831. ML7223 IOH is for MP(Media Phone) use. - ML7223 is companion chip for Intel Atom E6xx series. - ML7223 is completely compatible for Intel EG20T PCH. + ML7831 IOH is for general purpose use. + ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7223/ML7831 is completely compatible for Intel EG20T PCH. config GPIO_ML_IOH tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support" diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 0451d7ac94a..532f6900626 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c @@ -437,7 +437,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) do { level = __ffs(pending); - generic_handle_irq(level + chip->irq_base); + handle_nested_irq(level + chip->irq_base); pending &= ~(1 << level); } while (pending); @@ -481,8 +481,8 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq = lvl + chip->irq_base; irq_set_chip_data(irq, chip); - irq_set_chip_and_handler(irq, &pca953x_irq_chip, - handle_simple_irq); + irq_set_chip(irq, &pca953x_irq_chip); + irq_set_nested_thread(irq, true); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c index 36919e77c49..de26978b420 100644 --- a/drivers/gpio/pch_gpio.c +++ b/drivers/gpio/pch_gpio.c @@ -287,6 +287,7 @@ static int pch_gpio_resume(struct pci_dev *pdev) static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) }, + { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) }, { 0, } }; MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 3f46772f0cb..ba23790450e 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv, * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ -static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) +int drm_remove_magic(struct drm_master *master, drm_magic_t magic) { struct drm_magic_entry *pt; struct drm_hash_item *hash; @@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic) * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p * file_priv. + * This ioctl needs protection by the drm_global_mutex, which protects + * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) * \return zero if authentication successed, or a negative number otherwise. * * Checks if \p file_priv is associated with the magic number passed in \arg. + * This ioctl needs protection by the drm_global_mutex, which protects + * struct drm_file::magic and struct drm_magic_entry::priv. */ int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 1bbb85b9ce4..b4d789858c3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -584,7 +584,7 @@ static bool drm_monitor_supports_rb(struct edid *edid) { if (edid->revision >= 4) { - bool ret; + bool ret = false; drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); return ret; } @@ -841,7 +841,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; - unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; + unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 802b61ac313..a9dcdc7d372 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -610,9 +610,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, return -EINVAL; /* Need to resize the fb object !!! */ - if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) { + if (var->bits_per_pixel > fb->bits_per_pixel || + var->xres > fb->width || var->yres > fb->height || + var->xres_virtual > fb->width || var->yres_virtual > fb->height) { DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb " - "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, + "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", + var->xres, var->yres, var->bits_per_pixel, + var->xres_virtual, var->yres_virtual, fb->width, fb->height, fb->bits_per_pixel); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 2ec7d48fc4a..72fa601d4c4 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -135,8 +135,11 @@ int drm_open(struct inode *inode, struct file *filp) retcode = drm_open_helper(inode, filp, dev); if (!retcode) { atomic_inc(&dev->counts[_DRM_STAT_OPENS]); - if (!dev->open_count++) + if (!dev->open_count++) { retcode = drm_setup(dev); + if (retcode) + dev->open_count--; + } } if (!retcode) { mutex_lock(&dev->struct_mutex); @@ -486,6 +489,11 @@ int drm_release(struct inode *inode, struct file *filp) (long)old_encode_dev(file_priv->minor->device), dev->open_count); + /* Release any auth tokens that might point to this file_priv, + (do that under the drm_global_mutex) */ + if (file_priv->magic) + (void) drm_remove_magic(file_priv->master, file_priv->magic); + /* if the master has gone away we can't do anything with the lock */ if (file_priv->minor->master) drm_master_release(dev, filp); diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 959186cbf32..01894e4ceea 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -679,33 +679,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) EXPORT_SYMBOL(drm_mm_debug_table); #if defined(CONFIG_DEBUG_FS) -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) { - struct drm_mm_node *entry; - unsigned long total_used = 0, total_free = 0, total = 0; unsigned long hole_start, hole_end, hole_size; - hole_start = drm_mm_hole_node_start(&mm->head_node); - hole_end = drm_mm_hole_node_end(&mm->head_node); - hole_size = hole_end - hole_start; - if (hole_size) + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); + hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); - total_free += hole_size; + return hole_size; + } + + return 0; +} + +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +{ + struct drm_mm_node *entry; + unsigned long total_used = 0, total_free = 0, total = 0; + + total_free += drm_mm_dump_hole(m, &mm->head_node); drm_mm_for_each_node(entry, mm) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; - if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(entry); - hole_end = drm_mm_hole_node_end(entry); - hole_size = hole_end - hole_start; - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", - hole_start, hole_end, hole_size); - total_free += hole_size; - } + total_free += drm_mm_dump_hole(m, entry); } total = total_free + total_used; diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c index 206d2300d87..0c853f5c9c8 100644 --- a/drivers/gpu/drm/drm_usb.c +++ b/drivers/gpu/drm/drm_usb.c @@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface, usbdev = interface_to_usbdev(interface); dev->usbdev = usbdev; - dev->dev = &usbdev->dev; + dev->dev = &interface->dev; mutex_lock(&drm_global_mutex); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e36efdc67e1..19bab81483c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -119,7 +119,7 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", + seq_printf(m, "%pK: %s%s %8zd %04x %04x %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index eb91e2dd791..111686ada27 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -379,6 +379,10 @@ static int i915_drm_freeze(struct drm_device *dev) /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; + console_lock(); + intel_fbdev_set_suspend(dev, 1); + console_unlock(); + return 0; } @@ -438,7 +442,9 @@ static int i915_drm_thaw(struct drm_device *dev) drm_irq_install(dev); /* Resume the modeset for every activated CRTC */ + mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); @@ -448,6 +454,9 @@ static int i915_drm_thaw(struct drm_device *dev) dev_priv->modeset_on_lid = 0; + console_lock(); + intel_fbdev_set_suspend(dev, 0); + console_unlock(); return error; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 335564e35c3..b570415c3fd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -325,6 +325,8 @@ typedef struct drm_i915_private { struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; + uint32_t last_acthd_bsd; + uint32_t last_acthd_blt; uint32_t last_instdone; uint32_t last_instdone1; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5548593040b..46e04a10b07 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1256,6 +1256,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case 0: case -ERESTARTSYS: case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ return VM_FAULT_NOPAGE; case -ENOMEM: return VM_FAULT_OOM; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4934cf84c32..1ca53ffac6a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -655,6 +655,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, total = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; + u64 invalid_offset = (u64)-1; + int j; user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; @@ -665,6 +667,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } + /* As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + for (j = 0; j < exec[i].relocation_count; j++) { + if (copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + } + reloc_offset[i] = total; total += exec[i].relocation_count; } @@ -867,15 +888,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count) { int i; + int relocs_total = 0; + int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); for (i = 0; i < count; i++) { char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; int length; /* limited by fault_in_pages_readable() */ - /* First check for malicious input causing overflow */ - if (exec[i].relocation_count > - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + /* First check for malicious input causing overflow in + * the worst case where we need to allocate the entire + * relocation tree as a single array. + */ + if (exec[i].relocation_count > relocs_max - relocs_total) return -EINVAL; + relocs_total += exec[i].relocation_count; length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); @@ -1046,6 +1072,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { + DRM_DEBUG("execbuf with %u cliprects\n", + args->num_cliprects); + return -EINVAL; + } cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), GFP_KERNEL); if (cliprects == NULL) { @@ -1296,7 +1327,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; - if (args->buffer_count < 1) { + if (args->buffer_count < 1 || + args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 363564704b1..d05f03c6284 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -422,14 +422,11 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_unlock(&dev_priv->dev->struct_mutex); } -static void pch_irq_handler(struct drm_device *dev) +static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 pch_iir; int pipe; - pch_iir = I915_READ(SDEIIR); - if (pch_iir & SDE_AUDIO_POWER_MASK) DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", (pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -527,7 +524,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) if (de_iir & DE_PCH_EVENT_IVB) { if (pch_iir & SDE_HOTPLUG_MASK_CPT) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - pch_irq_handler(dev); + pch_irq_handler(dev, pch_iir); } if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { @@ -626,7 +623,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) if (de_iir & DE_PCH_EVENT) { if (pch_iir & hotplug_mask) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - pch_irq_handler(dev); + pch_irq_handler(dev, pch_iir); } if (de_iir & DE_PCU_EVENT) { @@ -1665,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd, instdone, instdone1; + uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; bool err = false; /* If all work is done then ACTHD clearly hasn't advanced. */ @@ -1679,16 +1676,21 @@ void i915_hangcheck_elapsed(unsigned long data) } if (INTEL_INFO(dev)->gen < 4) { - acthd = I915_READ(ACTHD); instdone = I915_READ(INSTDONE); instdone1 = 0; } else { - acthd = I915_READ(ACTHD_I965); instdone = I915_READ(INSTDONE_I965); instdone1 = I915_READ(INSTDONE1); } + acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]); + acthd_bsd = HAS_BSD(dev) ? + intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0; + acthd_blt = HAS_BLT(dev) ? + intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0; if (dev_priv->last_acthd == acthd && + dev_priv->last_acthd_bsd == acthd_bsd && + dev_priv->last_acthd_blt == acthd_blt && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { if (dev_priv->hangcheck_count++ > 1) { @@ -1720,6 +1722,8 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->hangcheck_count = 0; dev_priv->last_acthd = acthd; + dev_priv->last_acthd_bsd = acthd_bsd; + dev_priv->last_acthd_blt = acthd_blt; dev_priv->last_instdone = instdone; dev_priv->last_instdone1 = instdone1; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 673f0d2cd4e..5dc3b6d3932 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -27,6 +27,8 @@ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. @@ -354,6 +356,7 @@ * the enables for writing to the corresponding low bit. */ #define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) #define _3D_CHICKEN2 0x0208c /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the @@ -537,6 +540,21 @@ #define GEN6_BSD_RNCID 0x12198 +#define GEN7_FF_THREAD_MODE 0x20a0 +#define GEN7_FF_SCHED_MASK 0x0077070 +#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) +#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) +#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) +#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ +#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) +#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) +#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ +#define GEN7_FF_VS_SCHED_HW (0x0<<12) +#define GEN7_FF_DS_SCHED_HS1 (0x5<<4) +#define GEN7_FF_DS_SCHED_HS0 (0x3<<4) +#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ +#define GEN7_FF_DS_SCHED_HW (0x0<<4) + /* * Framebuffer compression (915+ only) */ @@ -2274,6 +2292,7 @@ #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) +#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1<<25) @@ -2738,6 +2757,8 @@ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 #define PF_ENABLE (1<<31) +#define PF_PIPE_SEL_MASK_IVB (3<<29) +#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) #define PF_FILTER_MASK (3<<23) #define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_MED_3x3 (1<<23) @@ -2847,6 +2868,20 @@ #define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_FBC_WM_DIS (1<<15) +/* GEN7 chicken */ +#define GEN7_COMMON_SLICE_CHICKEN1 0x7010 +# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) + +#define GEN7_L3CNTLREG1 0xB01C +#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C + +#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 +#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 + +/* WaCatErrorRejectionIssue */ +#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 +#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) + /* PCH */ /* south display engine interrupt */ @@ -3083,6 +3118,11 @@ #define TRANS_6BPC (2<<5) #define TRANS_12BPC (3<<5) +#define _TRANSA_CHICKEN2 0xf0064 +#define _TRANSB_CHICKEN2 0xf1064 +#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) +#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) + #define SOUTH_CHICKEN2 0xc2004 #define DPLS_EDP_PPS_FIX_DIS (1<<0) @@ -3371,6 +3411,7 @@ #define GT_FIFO_FREE_ENTRIES 0x120008 #define GEN6_UCGCTL2 0x9404 +# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index cf15533aabf..5ad0b5124b7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -34,6 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) struct drm_i915_private *dev_priv = dev->dev_private; u32 dpll_reg; + /* On IVB, 3rd pipe shares PLL with another one */ + if (pipe > 1) + return false; + if (HAS_PCH_SPLIT(dev)) dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B; else @@ -735,8 +739,11 @@ static void i915_restore_display(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); - I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); + /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; + * otherwise we get blank eDP screen after S3 on some machines + */ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); + I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 927442a1192..e5fa074b783 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -24,6 +24,7 @@ * Eric Anholt * */ +#include #include #include "drmP.h" #include "drm.h" @@ -592,6 +593,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->edp.bpp = 18; } +static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Falling back to manually reading VBT from " + "VBIOS ROM for %s\n", + id->ident); + return 1; +} + +static const struct dmi_system_id intel_no_opregion_vbt[] = { + { + .callback = intel_no_opregion_vbt_callback, + .ident = "ThinkCentre A57", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), + }, + }, + { } +}; + /** * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device @@ -612,7 +633,7 @@ intel_parse_bios(struct drm_device *dev) init_vbt_defaults(dev_priv); /* XXX Should this validation be moved to intel_opregion.c? */ - if (dev_priv->opregion.vbt) { + if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; if (memcmp(vbt->signature, "$VBT", 4) == 0) { DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fed87d6a5b9..2e0c24d42ed 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2696,7 +2696,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) * as some pre-programmed values are broken, * e.g. x201. */ - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); + if (IS_IVYBRIDGE(dev)) + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | + PF_PIPE_SEL_IVB(pipe)); + else + I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } @@ -2894,6 +2898,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + u32 pctl; if (!intel_crtc->active) return; @@ -2910,6 +2915,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_disable_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); + + /* Disable pannel fitter if it is on this pipe. */ + pctl = I915_READ(PFIT_CONTROL); + if ((pctl & PFIT_ENABLE) && + ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) + I915_WRITE(PFIT_CONTROL, 0); + intel_disable_pll(dev_priv, pipe); intel_crtc->active = false; @@ -5265,7 +5277,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) int i; /* The clocks have to be on to load the palette. */ - if (!crtc->enabled) + if (!crtc->enabled || !intel_crtc->active) return; /* use legacy palette for Ironlake */ @@ -6495,8 +6507,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj; + struct drm_framebuffer *old_fb = crtc->fb; + struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags; @@ -6508,15 +6520,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->dev = crtc->dev; - intel_fb = to_intel_framebuffer(crtc->fb); - work->old_fb_obj = intel_fb->obj; + work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; INIT_WORK(&work->work, intel_unpin_work_fn); + ret = drm_vblank_get(dev, intel_crtc->pipe); + if (ret) + goto free_work; + /* We borrow the event spin lock for protecting unpin_work */ spin_lock_irqsave(&dev->event_lock, flags); if (intel_crtc->unpin_work) { spin_unlock_irqrestore(&dev->event_lock, flags); kfree(work); + drm_vblank_put(dev, intel_crtc->pipe); DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); return -EBUSY; @@ -6524,9 +6540,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->unpin_work = work; spin_unlock_irqrestore(&dev->event_lock, flags); - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - mutex_lock(&dev->struct_mutex); /* Reference the objects for the scheduled work. */ @@ -6535,10 +6548,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->fb = fb; - ret = drm_vblank_get(dev, intel_crtc->pipe); - if (ret) - goto cleanup_objs; - work->pending_flip_obj = obj; work->enable_stall_check = true; @@ -6560,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_pending: atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); -cleanup_objs: + crtc->fb = old_fb; drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); @@ -6569,6 +6578,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->unpin_work = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); + drm_vblank_put(dev, intel_crtc->pipe); +free_work: kfree(work); return ret; @@ -6579,6 +6590,13 @@ static void intel_sanitize_modesetting(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg, val; + int i; + + /* Clear any frame start delays used for debugging left by the BIOS */ + for_each_pipe(i) { + reg = PIPECONF(i); + I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + } if (HAS_PCH_SPLIT(dev)) return; @@ -7401,6 +7419,10 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); + /* WaDisableHiZPlanesWhenMSAAEnabled */ + I915_WRITE(_3D_CHICKEN, + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); + I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); @@ -7445,6 +7467,18 @@ static void gen6_init_clock_gating(struct drm_device *dev) DISPPLANE_TRICKLE_FEED_DISABLE); } +static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) +{ + uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); + + reg &= ~GEN7_FF_SCHED_MASK; + reg |= GEN7_FF_TS_SCHED_HW; + reg |= GEN7_FF_VS_SCHED_HW; + reg |= GEN7_FF_DS_SCHED_HW; + + I915_WRITE(GEN7_FF_THREAD_MODE, reg); +} + static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -7457,8 +7491,28 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); + /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. + * This implements the WaDisableRCZUnitClockGating workaround. + */ + I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); + /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ + I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, + GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); + + /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ + I915_WRITE(GEN7_L3CNTLREG1, + GEN7_WA_FOR_GEN7_L3_CONTROL); + I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, + GEN7_WA_L3_CHICKEN_MODE); + + /* This is required by WaCatErrorRejectionIssue */ + I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); + for_each_pipe(pipe) I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | @@ -7545,6 +7599,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) static void cpt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -7554,6 +7609,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); } static void ironlake_teardown_rc6(struct drm_device *dev) @@ -7571,6 +7629,8 @@ static void ironlake_teardown_rc6(struct drm_device *dev) drm_gem_object_unreference(&dev_priv->pwrctx->base); dev_priv->pwrctx = NULL; } + + gen7_setup_fixed_func_scheduler(dev_priv); } static void ironlake_disable_rc6(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 14264a8c03e..bf9fea94161 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1554,6 +1554,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); } + DP &= ~DP_AUDIO_OUTPUT_ENABLE; I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9ffa61eb4d7..58b54ff3610 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -204,7 +204,7 @@ struct dip_infoframe { uint16_t bottom_bar_start; uint16_t left_bar_end; uint16_t right_bar_start; - } avi; + } __attribute__ ((packed)) avi; uint8_t payload[27]; } __attribute__ ((packed)) body; } __attribute__((packed)); @@ -330,7 +330,7 @@ extern int intel_framebuffer_init(struct drm_device *dev, struct drm_i915_gem_object *obj); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); - +extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); extern void intel_prepare_page_flip(struct drm_device *dev, int plane); extern void intel_finish_page_flip(struct drm_device *dev, int pipe); extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 6eda1b51c63..8ac91b80e15 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -371,6 +371,7 @@ void intel_dvo_init(struct drm_device *dev) const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; struct i2c_adapter *i2c; int gpio; + bool dvoinit; /* Allow the I2C driver info to specify the GPIO to be used in * special cases, but otherwise default to what's defined @@ -390,7 +391,17 @@ void intel_dvo_init(struct drm_device *dev) i2c = &dev_priv->gmbus[gpio].adapter; intel_dvo->dev = *dvo; - if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) + + /* GMBUS NAK handling seems to be unstable, hence let the + * transmitter detection run in bit banging mode for now. + */ + intel_gmbus_force_bit(i2c, true); + + dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c); + + intel_gmbus_force_bit(i2c, false); + + if (!dvoinit) continue; intel_encoder->type = INTEL_OUTPUT_DVO; diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index ec49bae7338..d0ce34b78cc 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -257,6 +257,16 @@ void intel_fbdev_fini(struct drm_device *dev) kfree(dev_priv->fbdev); dev_priv->fbdev = NULL; } + +void intel_fbdev_set_suspend(struct drm_device *dev, int state) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + if (!dev_priv->fbdev) + return; + + fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); +} + MODULE_LICENSE("GPL and additional rights"); void intel_fb_output_poll_changed(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index aa0a8e83142..918bac898ee 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -158,6 +158,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; + u32 enable_bits = SDVO_ENABLE; + + if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON) + enable_bits |= SDVO_AUDIO_ENABLE; temp = I915_READ(intel_hdmi->sdvox_reg); @@ -170,9 +174,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) } if (mode != DRM_MODE_DPMS_ON) { - temp &= ~SDVO_ENABLE; + temp &= ~enable_bits; } else { - temp |= SDVO_ENABLE; + temp |= enable_bits; } I915_WRITE(intel_hdmi->sdvox_reg, temp); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b28f7bd9f88..09881ac5065 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -712,6 +712,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen i45GMx-I", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), + }, + }, { .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", @@ -735,6 +743,38 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "MSI Wind Box DC500", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Supermicro X7SPA-H", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Fujitsu Esprimo Q900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index d2c71042290..e7a97b5863a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -413,6 +413,25 @@ static void intel_didl_outputs(struct drm_device *dev) goto end; } +static void intel_setup_cadls(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + int i = 0; + u32 disp_id; + + /* Initialize the CADL field by duplicating the DIDL values. + * Technically, this is not always correct as display outputs may exist, + * but not active. This initialization is necessary for some Clevo + * laptops that check this field before processing the brightness and + * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if + * there are less than eight devices. */ + do { + disp_id = ioread32(&opregion->acpi->didl[i]); + iowrite32(disp_id, &opregion->acpi->cadl[i]); + } while (++i < 8 && disp_id != 0); +} + void intel_opregion_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -422,8 +441,10 @@ void intel_opregion_init(struct drm_device *dev) return; if (opregion->acpi) { - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_didl_outputs(dev); + intel_setup_cadls(dev); + } /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 9e2959bc91c..1fe7c073fa8 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -428,9 +428,17 @@ static int intel_overlay_off(struct intel_overlay *overlay) OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - OUT_RING(flip_addr); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + if (IS_I830(dev)) { + /* Workaround: Don't disable the overlay fully, since otherwise + * it dies on the next OVERLAY_ON cmd. */ + OUT_RING(MI_NOOP); + OUT_RING(MI_NOOP); + OUT_RING(MI_NOOP); + } else { + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + OUT_RING(flip_addr); + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + } ADVANCE_LP_RING(); return intel_overlay_do_wait_request(overlay, request, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1f61fc7b754..3bd85f7e390 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -150,8 +150,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - /* Initialize the ring. */ - I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -177,6 +175,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) } } + /* Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. */ + I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_REPORT_64K | RING_VALID); @@ -863,7 +866,7 @@ int intel_init_ring_buffer(struct drm_device *dev, * of the buffer. */ ring->effective_size = ring->size; - if (IS_I830(ring->dev)) + if (IS_I830(ring->dev) || IS_845G(ring->dev)) ring->effective_size -= 128; return 0; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d893..d1141e83356 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -724,6 +724,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; + int mode_clock; width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; @@ -738,7 +739,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; - dtd->part1.clock = mode->clock / 10; + mode_clock = mode->clock; + mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; + mode_clock /= 10; + dtd->part1.clock = mode_clock; + dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | @@ -757,10 +762,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, ((v_sync_len & 0x30) >> 4); dtd->part2.dtd_flags = 0x18; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE; if (mode->flags & DRM_MODE_FLAG_PHSYNC) - dtd->part2.dtd_flags |= 0x2; + dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE; if (mode->flags & DRM_MODE_FLAG_PVSYNC) - dtd->part2.dtd_flags |= 0x4; + dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE; dtd->part2.sdvo_flags = 0; dtd->part2.v_sync_off_high = v_sync_offset & 0xc0; @@ -794,9 +801,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->clock = dtd->part1.clock * 10; mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); - if (dtd->part2.dtd_flags & 0x2) + if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) mode->flags |= DRM_MODE_FLAG_PHSYNC; - if (dtd->part2.dtd_flags & 0x4) + if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) mode->flags |= DRM_MODE_FLAG_PVSYNC; } @@ -852,31 +861,38 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) } #endif -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) +static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, + unsigned if_index, uint8_t tx_rate, + uint8_t *data, unsigned length) { - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; - uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; - uint8_t set_buf_index[2] = { 1, 0 }; - uint64_t *data = (uint64_t *)&avi_if; - unsigned i; - - intel_dip_infoframe_csum(&avi_if); + uint8_t set_buf_index[2] = { if_index, 0 }; + uint8_t hbuf_size, tmp[8]; + int i; if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2)) return false; - for (i = 0; i < sizeof(avi_if); i += 8) { + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + &hbuf_size, 1)) + return false; + + /* Buffer size is 0 based, hooray! */ + hbuf_size++; + + DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + if_index, length, hbuf_size); + + for (i = 0; i < hbuf_size; i += 8) { + memset(tmp, 0, 8); + if (i < length) + memcpy(tmp, data + i, min_t(unsigned, 8, length - i)); + if (!intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, - data, 8)) + tmp, 8)) return false; - data++; } return intel_sdvo_set_value(intel_sdvo, @@ -884,6 +900,28 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) &tx_rate, 1); } +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; + + intel_dip_infoframe_csum(&avi_if); + + /* sdvo spec says that the ecc is handled by the hw, and it looks like + * we must not send the ecc field, either. */ + memcpy(sdvo_data, &avi_if, 3); + sdvo_data[3] = avi_if.checksum; + memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); + + return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, + SDVO_HBUF_TX_VSYNC, + sdvo_data, sizeof(sdvo_data)); +} + static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) { struct intel_sdvo_tv_format format; @@ -990,7 +1028,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; - struct intel_sdvo_dtd input_dtd; + struct intel_sdvo_dtd input_dtd, output_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; @@ -1015,20 +1053,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo->attached_output)) return; - /* We have tried to get input timing in mode_fixup, and filled into - * adjusted_mode. - */ - if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { - input_dtd = intel_sdvo->input_dtd; - } else { - /* Set the output timing to the screen */ - if (!intel_sdvo_set_target_output(intel_sdvo, - intel_sdvo->attached_output)) - return; - - intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); - } + /* lvds has a special fixed output timing. */ + if (intel_sdvo->is_lvds) + intel_sdvo_get_dtd_from_mode(&output_dtd, + intel_sdvo->sdvo_lvds_fixed_mode); + else + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) @@ -1046,6 +1077,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, !intel_sdvo_set_tv_format(intel_sdvo)) return; + /* We have tried to get input timing in mode_fixup, and filled into + * adjusted_mode. + */ + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { @@ -1059,15 +1094,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { - sdvox = 0; + /* The real mode polarity is set by the SDVO commands, using + * struct intel_sdvo_dtd. */ + sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; if (intel_sdvo->is_hdmi) sdvox |= intel_sdvo->color_range; if (INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_BORDER_ENABLE; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - sdvox |= SDVO_VSYNC_ACTIVE_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { sdvox = I915_READ(intel_sdvo->sdvo_reg); switch (intel_sdvo->sdvo_reg) { diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index 4f4e23bc2d1..50bebc36404 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -61,6 +61,11 @@ struct intel_sdvo_caps { u16 output_flags; } __attribute__((packed)); +/* Note: SDVO detailed timing flags match EDID misc flags. */ +#define DTD_FLAG_HSYNC_POSITIVE (1 << 1) +#define DTD_FLAG_VSYNC_POSITIVE (1 << 2) +#define DTD_FLAG_INTERLACE (1 << 7) + /** This matches the EDID DTD structure, more or less */ struct intel_sdvo_dtd { struct { @@ -703,6 +708,8 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 #define SDVO_CMD_SET_HBUF_INDEX 0x93 + #define SDVO_HBUF_INDEX_ELD 0 + #define SDVO_HBUF_INDEX_AVI_IF 1 #define SDVO_CMD_GET_HBUF_INDEX 0x94 #define SDVO_CMD_GET_HBUF_INFO 0x95 #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 113e4e7264c..2136e6bc893 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -417,7 +417,7 @@ static const struct tv_mode tv_modes[] = { { .name = "NTSC-M", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */ @@ -460,7 +460,7 @@ static const struct tv_mode tv_modes[] = { { .name = "NTSC-443", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */ @@ -502,7 +502,7 @@ static const struct tv_mode tv_modes[] = { { .name = "NTSC-J", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -545,7 +545,7 @@ static const struct tv_mode tv_modes[] = { { .name = "PAL-M", .clock = 108000, - .refresh = 29970, + .refresh = 59940, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -589,7 +589,7 @@ static const struct tv_mode tv_modes[] = { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL-N", .clock = 108000, - .refresh = 25000, + .refresh = 50000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -634,7 +634,7 @@ static const struct tv_mode tv_modes[] = { /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ .name = "PAL", .clock = 108000, - .refresh = 25000, + .refresh = 50000, .oversample = TV_OVERSAMPLE_8X, .component_only = 0, @@ -821,7 +821,7 @@ static const struct tv_mode tv_modes[] = { { .name = "1080i@50Hz", .clock = 148800, - .refresh = 25000, + .refresh = 50000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, @@ -847,7 +847,7 @@ static const struct tv_mode tv_modes[] = { { .name = "1080i@60Hz", .clock = 148800, - .refresh = 30000, + .refresh = 60000, .oversample = TV_OVERSAMPLE_2X, .component_only = 1, @@ -1301,6 +1301,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv, I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); I915_WRITE(TV_CTL, save_tv_ctl); + POSTING_READ(TV_CTL); + + /* For unknown reasons the hw barfs if we don't do this vblank wait. */ + intel_wait_for_vblank(intel_tv->base.base.dev, + to_intel_crtc(intel_tv->base.base.crtc)->pipe); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2ad49cbf7c8..5fb98de0c57 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1075,7 +1075,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->placement.fpfn = 0; nvbo->placement.lpfn = dev_priv->fb_mappable_pages; - nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); return nouveau_bo_validate(nvbo, false, true, false); } diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 39aee6d4daf..ea71f78fb55 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -487,7 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev) nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; ret = drm_fb_helper_init(dev, &nfbdev->helper, - nv_two_heads(dev) ? 2 : 1, 4); + dev->mode_config.num_crtc, 4); if (ret) { kfree(nfbdev); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b52e4601824..cee78b26f60 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -314,6 +314,25 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, return 0; } +static int +validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) +{ + struct nouveau_fence *fence = NULL; + int ret = 0; + + spin_lock(&nvbo->bo.bdev->fence_lock); + if (nvbo->bo.sync_obj) + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + + if (fence) { + ret = nouveau_fence_sync(fence, chan); + nouveau_fence_unref(&fence); + } + + return ret; +} + static int validate_list(struct nouveau_channel *chan, struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) @@ -327,7 +346,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); + ret = validate_sync(chan, nvbo); if (unlikely(ret)) { NV_ERROR(dev, "fail pre-validate sync\n"); return ret; @@ -350,7 +369,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, return ret; } - ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan); + ret = validate_sync(chan, nvbo); if (unlikely(ret)) { NV_ERROR(dev, "fail post-validate sync\n"); return ret; diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index e000455e06d..2d6bfd03dc1 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -209,7 +209,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); if (blue == 0x18) { - NV_INFO(dev, "Load detected on head A\n"); + NV_DEBUG(dev, "Load detected on head A\n"); return connector_status_connected; } @@ -323,7 +323,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) if (nv17_dac_sample_load(encoder) & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { - NV_INFO(dev, "Load detected on output %c\n", + NV_DEBUG(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); return connector_status_connected; } else { @@ -398,7 +398,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } @@ -447,7 +447,7 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on vga encoder (output %d)\n", mode, nv_encoder->dcb->index); nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 12098bf839c..752440c7a3d 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -468,7 +468,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } @@ -511,7 +511,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", mode, nv_encoder->dcb->index); if (was_powersaving && is_powersaving_dpms(mode)) @@ -556,7 +556,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) return; nv_encoder->last_dpms = mode; - NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", mode, nv_encoder->dcb->index); nv04_dfp_update_backlight(encoder, mode); diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index 3eb605ddfd0..4de1fbeb849 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -69,7 +69,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) struct nv04_mode_state *state = &dev_priv->mode_reg; uint8_t crtc1A; - NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", + NV_DEBUG(dev, "Setting dpms mode %d on TV encoder (output %d)\n", mode, nv_encoder->dcb->index); state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); @@ -162,7 +162,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder) helper->dpms(encoder, DRM_MODE_DPMS_ON); - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", + NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 9a0aee2f065..6b84d27924e 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1301,8 +1301,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) int atom_asic_init(struct atom_context *ctx) { + struct radeon_device *rdev = ctx->card->dev->dev_private; int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); uint32_t ps[16]; + int ret; + memset(ps, 0, 64); ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); @@ -1312,7 +1315,17 @@ int atom_asic_init(struct atom_context *ctx) if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) return 1; - return atom_execute_table(ctx, ATOM_CMD_INIT, ps); + ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); + if (ret) + return ret; + + memset(ps, 0, 64); + + if (rdev->family < CHIP_R600) { + if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) + atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); + } + return ret; } void atom_destroy(struct atom_context *ctx) @@ -1371,10 +1384,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); DRM_DEBUG("atom firmware requested %08x %dkb\n", - firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, - firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); + le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), + le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); - usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; + usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; } ctx->scratch_size_bytes = 0; if (usage_bytes == 0) diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 93cfe2086ba..25fea631dad 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -44,6 +44,7 @@ #define ATOM_CMD_SETSCLK 0x0A #define ATOM_CMD_SETMCLK 0x0B #define ATOM_CMD_SETPCLK 0x0C +#define ATOM_CMD_SPDFANCNTL 0x39 #define ATOM_DATA_FWI_PTR 0xC #define ATOM_DATA_IIO_PTR 0x32 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 9541995e4b2..071ded119eb 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1173,7 +1173,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, - crtc->mode.vdisplay); + target_fb->height); x &= ~3; y &= ~1; WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, @@ -1342,7 +1342,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, - crtc->mode.vdisplay); + target_fb->height); x &= ~3; y &= ~1; WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 3b77ad60ed5..efc2b214b34 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -22,6 +22,7 @@ * * Authors: Dave Airlie * Alex Deucher + * Jerome Glisse */ #include "drmP.h" #include "radeon_drm.h" @@ -624,7 +625,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, link_status, DP_LINK_STATUS_SIZE, 100); if (ret <= 0) { - DRM_ERROR("displayport link status failed\n"); return false; } @@ -797,8 +797,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) else mdelay(dp_info->rd_interval * 4); - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { + DRM_ERROR("displayport link status failed\n"); break; + } if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { clock_recovery = true; @@ -860,8 +862,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) else mdelay(dp_info->rd_interval * 4); - if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) + if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { + DRM_ERROR("displayport link status failed\n"); break; + } if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { channel_eq = true; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index fe052c618ae..d3264b9e3b6 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -330,6 +330,16 @@ void evergreen_hpd_init(struct radeon_device *rdev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + continue; + } switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, tmp); @@ -926,6 +936,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + if ((rdev->family == CHIP_JUNIPER) || + (rdev->family == CHIP_CYPRESS) || + (rdev->family == CHIP_HEMLOCK) || + (rdev->family == CHIP_BARTS)) + WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); } WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); @@ -1014,24 +1029,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { - save->vga_control[0] = RREG32(D1VGA_CONTROL); - save->vga_control[1] = RREG32(D2VGA_CONTROL); save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); - save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (rdev->num_crtc >= 4) { - save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); - save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - } - if (rdev->num_crtc >= 6) { - save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); - save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); - } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); @@ -1076,6 +1075,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav WREG32(EVERGREEN_D5VGA_CONTROL, 0); WREG32(EVERGREEN_D6VGA_CONTROL, 0); } + /* wait for the MC to settle */ + udelay(100); } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -1142,47 +1143,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s /* Unlock host access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(D1VGA_CONTROL, save->vga_control[0]); - WREG32(D2VGA_CONTROL, save->vga_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); - WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); - WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); - } - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); - } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -2064,9 +2024,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); WREG32(GC_USER_RB_BACKEND_DISABLE, rb); WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); - } + } - grbm_gfx_index |= SE_BROADCAST_WRITES; + grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES; WREG32(GRBM_GFX_INDEX, grbm_gfx_index); WREG32(RLC_GFX_INDEX, grbm_gfx_index); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b7b2714f0b3..6078ae4cc16 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -230,6 +230,7 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C +#define MC_VM_MD_L1_TLB3_CNTL 0x2698 #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index b94d871487e..d94f440f137 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -681,9 +681,7 @@ int r100_irq_process(struct radeon_device *rdev) WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); break; default: - msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; - WREG32(RADEON_MSI_REARM_EN, msi_rearm); - WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); + WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); break; } } @@ -2069,6 +2067,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev) void r100_bm_disable(struct radeon_device *rdev) { u32 tmp; + u16 tmp16; /* disable bus mastering */ tmp = RREG32(R_000030_BUS_CNTL); @@ -2079,8 +2078,8 @@ void r100_bm_disable(struct radeon_device *rdev) WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); tmp = RREG32(RADEON_BUS_CNTL); mdelay(1); - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); - pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); + pci_read_config_word(rdev->pdev, 0x4, &tmp16); + pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB); mdelay(1); } diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index c5c2742e414..a12f3738111 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); for (i = 0; i < nr; ++i) { - if (DRM_COPY_FROM_USER_UNCHECKED + if (DRM_COPY_FROM_USER (&box, &cmdbuf->boxes[n + i], sizeof(box))) { DRM_ERROR("copy cliprect faulted\n"); return -EFAULT; diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index 2d1f6c5ee2a..73e2c7c6edb 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c @@ -313,6 +313,10 @@ const u32 r6xx_default_state[] = 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, /* VGT_VTX_CNT_EN */ + 0xc0016900, + 0x000000d4, + 0x00000000, /* SX_MISC */ + 0xc0016900, 0x000002c8, 0x00000000, /* VGT_STRMOUT_BUFFER_EN */ @@ -625,6 +629,10 @@ const u32 r7xx_default_state[] = 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, /* VGT_VTX_CNT_EN */ + 0xc0016900, + 0x000000d4, + 0x00000000, /* SX_MISC */ + 0xc0016900, 0x000002c8, 0x00000000, /* VGT_STRMOUT_BUFFER_EN */ diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f5ac7e788d8..c45d92191fd 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe( frame[0xD] = (right_bar >> 8); r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame); + /* Our header values (type, version, length) should be alright, Intel + * is using the same. Checksum function also seems to be OK, it works + * fine for audio infoframe. However calculated value is always lower + * by 2 in comparison to fglrx. It breaks displaying anything in case + * of TVs that strictly check the checksum. Hack it manually here to + * workaround this issue. */ + frame[0x0] += 2; WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0, frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index bd2f33e5c91..bc6b64fe0e0 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, PCI_VENDOR_ID_DELL, 0x00e3, 2}, - /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ + /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, PCI_VENDOR_ID_DELL, 0x0149, 1}, + /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ + { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_IBM, 0x0531, 1}, /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 0x1025, 0x0061, 1}, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3dedaa07aac..4d81e961239 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -253,13 +253,10 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, * rv515 */ struct rv515_mc_save { - u32 d1vga_control; - u32 d2vga_control; u32 vga_render_control; u32 vga_hdp_control; - u32 d1crtc_control; - u32 d2crtc_control; }; + int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -387,11 +384,10 @@ void r700_cp_fini(struct radeon_device *rdev); * evergreen */ struct evergreen_mc_save { - u32 vga_control[6]; u32 vga_render_control; u32 vga_hdp_control; - u32 crtc_control[6]; }; + void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index a098edcf662..b1aade04a12 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -480,10 +480,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, */ if ((dev->pdev->device == 0x9498) && (dev->pdev->subsystem_vendor == 0x1682) && - (dev->pdev->subsystem_device == 0x2452)) { + (dev->pdev->subsystem_device == 0x2452) && + (i2c_bus->valid == false) && + !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) { struct radeon_device *rdev = dev->dev_private; *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); } + + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ + if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + *line_mux = 0x3103; + } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + } + } + + return true; } @@ -2015,6 +2031,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + if (num_modes == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; @@ -2385,6 +2403,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + if (power_info->pplib.ucNumStates == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2469,6 +2489,8 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_info_array = (struct NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + if (state_array->ucNumEntries == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2545,7 +2567,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) default: break; } - } else { + } + + if (state_index == 0) { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { /* add the default mode */ diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 859df6b5bca..98fb3d73f36 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -958,6 +958,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct found = 1; } + /* quirks */ + /* Radeon 9100 (R200) */ + if ((dev->pdev->device == 0x514D) && + (dev->pdev->subsystem_vendor == 0x174B) && + (dev->pdev->subsystem_device == 0x7149)) { + /* vbios value is bad, use the default */ + found = 0; + } + if (!found) /* fallback to defaults */ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); @@ -2338,6 +2347,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 1), ATOM_DEVICE_CRT1_SUPPORT); } + /* RV100 board with external TDMS bit mis-set. + * Actually uses internal TMDS, clear the bit. + */ + if (dev->pdev->device == 0x5159 && + dev->pdev->subsystem_vendor == 0x1014 && + dev->pdev->subsystem_device == 0x029A) { + tmp &= ~(1 << 4); + } if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 2109c17a982..f1a1e8aa4da 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -66,14 +66,33 @@ void radeon_connector_hotplug(struct drm_connector *connector) /* just deal with DP (not eDP) here. */ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { - int saved_dpms = connector->dpms; - - /* Only turn off the display it it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - else if (radeon_dp_needs_link_train(radeon_connector)) - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - connector->dpms = saved_dpms; + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + /* if existing sink type was not DP no need to retrain */ + if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) + return; + + /* first get sink type as it may be reset after (un)plug */ + dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); + /* don't do anything if sink is not display port, i.e., + * passive dp->(dvi|hdmi) adaptor + */ + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { + int saved_dpms = connector->dpms; + /* Only turn off the display if it's physically disconnected */ + if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + } else if (radeon_dp_needs_link_train(radeon_connector)) { + /* set it to OFF so that drm_helper_connector_dpms() + * won't return immediately since the current state + * is ON at this point. + */ + connector->dpms = DRM_MODE_DPMS_OFF; + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } + connector->dpms = saved_dpms; + } } } @@ -990,6 +1009,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) encoder = obj_to_encoder(obj); + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && + encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) + continue; + encoder_funcs = encoder->helper_private; if (encoder_funcs->detect) { if (ret != connector_status_connected) { @@ -1016,6 +1039,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) * cases the DVI port is actually a virtual KVM port connected to the service * processor. */ +out: if ((!rdev->is_atom_bios) && (ret == connector_status_disconnected) && rdev->mode_info.bios_hardcoded_edid_size) { @@ -1023,7 +1047,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } -out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); return ret; diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index f59a6823301..72f749d56c1 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -151,7 +151,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, uint32_t height) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_device *rdev = crtc->dev->dev_private; struct drm_gem_object *obj; + struct radeon_bo *robj; uint64_t gpu_addr; int ret; @@ -173,7 +175,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, return -ENOENT; } - ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr); + robj = gem_to_radeon_bo(obj); + ret = radeon_bo_reserve(robj, false); + if (unlikely(ret != 0)) + goto fail; + /* Only 27 bit offset for legacy cursor */ + ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, + ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, + &gpu_addr); + radeon_bo_unreserve(robj); if (ret) goto fail; @@ -181,7 +191,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, radeon_crtc->cursor_height = height; radeon_lock_cursor(crtc, true); - /* XXX only 27 bit offset for legacy cursor */ radeon_set_cursor(crtc, obj, gpu_addr); radeon_show_cursor(crtc); radeon_lock_cursor(crtc, false); @@ -248,8 +257,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, if (!(cursor_end & 0x7f)) w--; } - if (w <= 0) + if (w <= 0) { w = 1; + cursor_end = x - xorigin + w; + if (!(cursor_end & 0x7f)) { + x--; + WARN_ON_ONCE(x < 0); + } + } } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 440e6ecccc4..a275cf69de9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -223,8 +223,11 @@ int radeon_wb_init(struct radeon_device *rdev) if (radeon_no_wb == 1) rdev->wb.enabled = false; else { - /* often unreliable on AGP */ if (rdev->flags & RADEON_IS_AGP) { + /* often unreliable on AGP */ + rdev->wb.enabled = false; + } else if (rdev->family < CHIP_R300) { + /* often unreliable on pre-r300 */ rdev->wb.enabled = false; } else { rdev->wb.enabled = true; @@ -349,18 +352,17 @@ bool radeon_card_posted(struct radeon_device *rdev) uint32_t reg; /* first check CRTCs */ - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE4(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (reg & EVERGREEN_CRTC_MASTER_EN) - return true; - } else if (ASIC_IS_DCE4(rdev)) { - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + } if (reg & EVERGREEN_CRTC_MASTER_EN) return true; } else if (ASIC_IS_AVIVO(rdev)) { @@ -854,6 +856,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + drm_kms_helper_poll_disable(dev); + /* turn off display hw */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); @@ -940,6 +944,8 @@ int radeon_resume_kms(struct drm_device *dev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } + + drm_kms_helper_poll_enable(dev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ed085ce90e9..0896faed9fd 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1158,8 +1158,10 @@ radeon_user_framebuffer_create(struct drm_device *dev, } radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); - if (radeon_fb == NULL) + if (radeon_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(-ENOMEM); + } radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6c111c1fa3f..c90425c439d 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -898,6 +898,10 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_chan *i2c; int ret; + /* don't add the mm_i2c bus unless hw_i2c is enabled */ + if (rec->mm_i2c && (radeon_hw_i2c == 0)) + return NULL; + i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); if (i2c == NULL) return NULL; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index fecc1aae382..1cfe7539fd9 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -131,12 +131,34 @@ static bool radeon_msi_ok(struct radeon_device *rdev) (rdev->pdev->subsystem_device == 0x30c2)) return true; + /* Dell RS690 only seems to work with MSIs. */ + if ((rdev->pdev->device == 0x791f) && + (rdev->pdev->subsystem_vendor == 0x1028) && + (rdev->pdev->subsystem_device == 0x01fc)) + return true; + /* Dell RS690 only seems to work with MSIs. */ if ((rdev->pdev->device == 0x791f) && (rdev->pdev->subsystem_vendor == 0x1028) && (rdev->pdev->subsystem_device == 0x01fd)) return true; + /* Gateway RS690 only seems to work with MSIs. */ + if ((rdev->pdev->device == 0x791f) && + (rdev->pdev->subsystem_vendor == 0x107b) && + (rdev->pdev->subsystem_device == 0x0185)) + return true; + + /* try and enable MSIs by default on all RS690s */ + if (rdev->family == CHIP_RS690) + return true; + + /* RV515 seems to have MSI issues where it loses + * MSI rearms occasionally. This leads to lockups and freezes. + * disable it by default. + */ + if (rdev->family == CHIP_RV515) + return false; if (rdev->flags & RADEON_IS_IGP) { /* APUs work fine with MSIs */ if (rdev->family >= CHIP_PALM) diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2f46e0c8df5..a9068031ef5 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -617,6 +617,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc enum drm_connector_status found = connector_status_disconnected; bool color = true; + /* just don't bother on RN50 those chip are often connected to remoting + * console hw and often we get failure to load detect those. So to make + * everyone happy report the encoder as always connected. + */ + if (ASIC_IS_RN50(rdev)) { + return connector_status_connected; + } + /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); @@ -650,6 +658,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; WREG32(RADEON_DAC_CNTL, tmp); + tmp = dac_macro_cntl; tmp &= ~(RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B); @@ -973,11 +982,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; - if (tmds) { - if (tmds->i2c_bus) - radeon_i2c_destroy(tmds->i2c_bus); - } + /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 976c3b1b1b6..35da1b47419 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -204,7 +204,8 @@ void radeon_bo_unref(struct radeon_bo **bo) *bo = NULL; } -int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) +int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, + u64 *gpu_addr) { int r, i; @@ -212,6 +213,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) bo->pin_count++; if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); + WARN_ON_ONCE(max_offset != 0); return 0; } radeon_ttm_placement_from_domain(bo, domain); @@ -219,6 +221,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) /* force to pin into visible video ram */ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; } + if (max_offset) { + u64 lpfn = max_offset >> PAGE_SHIFT; + + if (!bo->placement.lpfn) + bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; + + if (lpfn < bo->placement.lpfn) + bo->placement.lpfn = lpfn; + } for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); @@ -232,6 +243,11 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return r; } +int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) +{ + return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); +} + int radeon_bo_unpin(struct radeon_bo *bo) { int r, i; diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index ede6c13628f..7199c6ab027 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -144,6 +144,8 @@ extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); extern void radeon_bo_unref(struct radeon_bo **bo); extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); +extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, + u64 max_offset, u64 *gpu_addr); extern int radeon_bo_unpin(struct radeon_bo *bo); extern int radeon_bo_evict_vram(struct radeon_device *rdev); extern void radeon_bo_force_delete(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fabe89fa6a..8270a85d940 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -535,7 +535,9 @@ void radeon_pm_suspend(struct radeon_device *rdev) void radeon_pm_resume(struct radeon_device *rdev) { /* set up the default clocks if the MC ucode is loaded */ - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { + if ((rdev->family >= CHIP_BARTS) && + (rdev->family <= CHIP_CAYMAN) && + rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); @@ -590,7 +592,9 @@ int radeon_pm_init(struct radeon_device *rdev) radeon_pm_print_states(rdev); radeon_pm_init_profile(rdev); /* set up the default clocks if the MC ucode is loaded */ - if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { + if ((rdev->family >= CHIP_BARTS) && + (rdev->family <= CHIP_CAYMAN) && + rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); @@ -841,7 +845,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct radeon_device *rdev = dev->dev_private; seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); + /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ + if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) + seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); + else + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); if (rdev->asic->get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index a37a1efdd22..2026c2d52c5 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev) void rs600_bm_disable(struct radeon_device *rdev) { - u32 tmp; + u16 tmp; /* disable bus mastering */ - pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); + pci_read_config_word(rdev->pdev, 0x4, &tmp); pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); mdelay(1); } @@ -698,9 +698,7 @@ int rs600_irq_process(struct radeon_device *rdev) WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); break; default: - msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; - WREG32(RADEON_MSI_REARM_EN, msi_rearm); - WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); + WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); break; } } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 6613ee9ecca..d5f45b4c1be 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) { - save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL); - save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL); save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); - save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL); - save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); /* Stop all video */ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); @@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) /* Unlock host access */ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); - WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); - WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); - WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 84cf82fcac8..51d20aa63d0 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + if (rdev->family == CHIP_RV740) + WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 79fa588e9ed..75380927e9c 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -174,6 +174,7 @@ #define MC_VM_MD_L1_TLB0_CNTL 0x2654 #define MC_VM_MD_L1_TLB1_CNTL 0x2658 #define MC_VM_MD_L1_TLB2_CNTL 0x265C +#define MC_VM_MD_L1_TLB3_CNTL 0x2698 #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 81b68508dc1..7632edb2f46 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1809,6 +1809,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_unlock(&glob->lru_lock); (void) ttm_bo_cleanup_refs(bo, false, false, false); kref_put(&bo->list_kref, ttm_bo_release_list); + spin_lock(&glob->lru_lock); continue; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 96949b93d92..55d4b29330f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -147,6 +147,7 @@ static struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; +MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); static int enable_fbdev; @@ -857,6 +858,11 @@ static void vmw_pm_complete(struct device *kdev) struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); + (void) vmw_read(dev_priv, SVGA_REG_ID); + mutex_unlock(&dev_priv->hw_mutex); + /** * Reclaim 3d reference held by fbdev and potentially * start fifo. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index dfe32e62bd9..8a38c91f4c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -313,7 +313,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, unsigned int *handle) { if (handle) - handle = 0; + *handle = 0; return 0; } diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c new file mode 100644 index 00000000000..b274ba25f9f --- /dev/null +++ b/drivers/gpu/ion/msm/ion_cp_common.c @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2011 Google, Inc + * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "ion_cp_common.h" + +#define MEM_PROTECT_LOCK_ID 0x05 + +struct cp2_mem_chunks { + unsigned int *chunk_list; + unsigned int chunk_list_size; + unsigned int chunk_size; +} __attribute__ ((__packed__)); + +struct cp2_lock_req { + struct cp2_mem_chunks chunks; + unsigned int mem_usage; + unsigned int lock; +} __attribute__ ((__packed__)); + +int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks, + unsigned int chunk_size, + enum cp_mem_usage usage, + int lock) +{ + struct cp2_lock_req request; + + request.mem_usage = usage; + request.lock = lock; + + request.chunks.chunk_list = (unsigned int *)chunks; + request.chunks.chunk_list_size = nchunks; + request.chunks.chunk_size = chunk_size; + + return scm_call(SCM_SVC_CP, MEM_PROTECT_LOCK_ID, + &request, sizeof(request), NULL, 0); + +} + diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h new file mode 100644 index 00000000000..69dd19e6609 --- /dev/null +++ b/drivers/gpu/ion/msm/ion_cp_common.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ION_CP_COMMON_H +#define ION_CP_COMMON_H + +#include +#include + +#define ION_CP_V1 1 +#define ION_CP_V2 2 + +#if defined(CONFIG_ION_MSM) +/* + * ion_cp2_protect_mem - secures memory via trustzone + * + * @chunks - physical address of the array containing the chunks to + * be locked down + * @nchunks - number of entries in the array + * @chunk_size - size of each memory chunk + * @usage - usage hint + * @lock - 1 for lock, 0 for unlock + * + * return value is the result of the scm call + */ +int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks, + unsigned int chunk_size, enum cp_mem_usage usage, + int lock); + +#else +static inline int ion_cp_change_chunks_state(unsigned long chunks, + unsigned int nchunks, unsigned int chunk_size, + enum cp_mem_usage usage, int lock) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c index aefb9cad40c..44c42e2ce0b 100644 --- a/drivers/gpu/ion/msm/msm_ion.c +++ b/drivers/gpu/ion/msm/msm_ion.c @@ -299,4 +299,3 @@ static void __exit msm_ion_exit(void) subsys_initcall(msm_ion_init); module_exit(msm_ion_exit); - diff --git a/drivers/gpu/msm/a2xx_reg.h b/drivers/gpu/msm/a2xx_reg.h index 50b2745bed0..28b8dac57e1 100644 --- a/drivers/gpu/msm/a2xx_reg.h +++ b/drivers/gpu/msm/a2xx_reg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -355,6 +355,7 @@ union reg_cp_rb_cntl { #define REG_RB_MODECONTROL 0x2208 #define REG_RB_SURFACE_INFO 0x2000 #define REG_RB_SAMPLE_POS 0x220a +#define REG_RB_BC_CONTROL 0x0F01 #define REG_SCRATCH_ADDR 0x01DD #define REG_SCRATCH_REG0 0x0578 diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index d38528a391e..e01bb605a34 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -113,10 +113,25 @@ static struct adreno_device device_3d0 = { }, .pfp_fw = NULL, .pm4_fw = NULL, - .wait_timeout = 10000, /* in milliseconds */ + .wait_timeout = 0, /* in milliseconds, 0 means disabled */ .ib_check_level = 0, }; +/* This set of registers are used for Hang detection + * If the values of these registers are same after + * KGSL_TIMEOUT_PART time, GPU hang is reported in + * kernel log. + */ +unsigned int hang_detect_regs[] = { + REG_RBBM_STATUS, + REG_CP_RB_RPTR, + REG_CP_IB1_BASE, + REG_CP_IB1_BUFSZ, + REG_CP_IB2_BASE, + REG_CP_IB2_BUFSZ, +}; + +const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs); /* * This is the master list of all GPU cores that are supported by this @@ -277,6 +292,12 @@ static void adreno_setstate(struct kgsl_device *device, struct kgsl_context *context; struct adreno_context *adreno_ctx = NULL; + /* + * Fix target freeze issue by adding TLB flush for each submit + * on A20X based targets. + */ + if (adreno_is_a20x(adreno_dev)) + flags |= KGSL_MMUFLAGS_TLBFLUSH; /* * If possible, then set the state via the command stream to avoid * a CPU idle. Otherwise, use the default setstate which uses register @@ -530,7 +551,9 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram) } kgsl_mh_start(device); - + /* Assign correct RBBM status register to hang detect regs + */ + hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status; if (kgsl_mmu_start(device)) goto error_clk_off; @@ -555,7 +578,10 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram) adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000); - adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442); + if (adreno_is_a200(adreno_dev)) + adreno_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF); + else + adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442); if (adreno_is_a225(adreno_dev)) { /* Enable large instruction store for A225 */ @@ -718,7 +744,7 @@ adreno_recover_hang(struct kgsl_device *device, rb->timestamp = timestamp; /* wait for idle */ - ret = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + ret = adreno_idle(device); done: kgsl_sharedmem_writel(&device->memstore, KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), @@ -930,61 +956,93 @@ static inline void adreno_poke(struct kgsl_device *device) adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr); } -/* Caller must hold the device mutex. */ -int adreno_idle(struct kgsl_device *device, unsigned int timeout) +static int adreno_ringbuffer_drain(struct kgsl_device *device, + unsigned int *regs) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; + unsigned long wait; + unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); + + if (!(rb->flags & KGSL_FLAGS_STARTED)) + return 0; + + /* + * The first time into the loop, wait for 100 msecs and kick wptr again + * to ensure that the hardware has updated correctly. After that, kick + * it periodically every KGSL_TIMEOUT_PART msecs until the timeout + * expires + */ + + wait = jiffies + msecs_to_jiffies(100); + + adreno_poke(device); + + do { + if (time_after(jiffies, wait)) { + adreno_poke(device); + + /* Check to see if the core is hung */ + if (adreno_hang_detect(device, regs)) + return -ETIMEDOUT; + + wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); + } + GSL_RB_GET_READPTR(rb, &rb->rptr); + + if (time_after(jiffies, timeout)) { + KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n", + rb->rptr, rb->wptr); + return -ETIMEDOUT; + } + } while (rb->rptr != rb->wptr); + + return 0; +} + +/* Caller must hold the device mutex. */ +int adreno_idle(struct kgsl_device *device) +{ unsigned int rbbm_status; - unsigned long wait_timeout = - msecs_to_jiffies(adreno_dev->wait_timeout); unsigned long wait_time; unsigned long wait_time_part; - unsigned int msecs; - unsigned int msecs_first; - unsigned int msecs_part; + unsigned int prev_reg_val[hang_detect_regs_count]; + + memset(prev_reg_val, 0, sizeof(prev_reg_val)); kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2, 0x00000000, 0x80000000); - /* first, wait until the CP has consumed all the commands in - * the ring buffer - */ + retry: - if (rb->flags & KGSL_FLAGS_STARTED) { - msecs = adreno_dev->wait_timeout; - msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; - msecs_part = (msecs - msecs_first + 3) / 4; - wait_time = jiffies + wait_timeout; - wait_time_part = jiffies + msecs_to_jiffies(msecs_first); - adreno_poke(device); - do { - if (time_after(jiffies, wait_time_part)) { - adreno_poke(device); - wait_time_part = jiffies + - msecs_to_jiffies(msecs_part); - } - GSL_RB_GET_READPTR(rb, &rb->rptr); - if (time_after(jiffies, wait_time)) { - KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n", - rb->rptr, rb->wptr); - goto err; - } - } while (rb->rptr != rb->wptr); - } + /* First, wait for the ringbuffer to drain */ + if (adreno_ringbuffer_drain(device, prev_reg_val)) + goto err; /* now, wait for the GPU to finish its operations */ - wait_time = jiffies + wait_timeout; + wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); + wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); + while (time_before(jiffies, wait_time)) { adreno_regread(device, REG_RBBM_STATUS, &rbbm_status); if (rbbm_status == 0x110) return 0; } + /* Dont wait for timeout, detect hang faster. + */ + if (time_after(jiffies, wait_time_part)) { + wait_time_part = jiffies + + msecs_to_jiffies(KGSL_TIMEOUT_PART); + if ((adreno_hang_detect(device, prev_reg_val))) + goto err; +} + + err: KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n"); if (KGSL_STATE_DUMP_AND_RECOVER != device->state && !adreno_dump_and_recover(device)) { - wait_time = jiffies + wait_timeout; + wait_time = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); goto retry; } return -ETIMEDOUT; @@ -1024,7 +1082,7 @@ static int adreno_suspend_context(struct kgsl_device *device) /* switch to NULL ctxt */ if (adreno_dev->drawctxt_active != NULL) { adreno_drawctxt_switch(adreno_dev, NULL, 0); - status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + status = adreno_idle(device); } return status; @@ -1217,6 +1275,30 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device, __ret; \ }) + +unsigned int adreno_hang_detect(struct kgsl_device *device, + unsigned int *prev_reg_val) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int curr_reg_val[hang_detect_regs_count]; + unsigned int hang_detected = 1; + unsigned int i; + + if (!adreno_dev->fast_hang_detect) + return 0; + + for (i = 0; i < hang_detect_regs_count; i++) { + adreno_regread(device, hang_detect_regs[i], + &curr_reg_val[i]); + if (curr_reg_val[i] != prev_reg_val[i]) { + prev_reg_val[i] = curr_reg_val[i]; + hang_detected = 0; + } + } + + return hang_detected; +} + /* MUST be called with the device mutex held */ static int adreno_waittimestamp(struct kgsl_device *device, unsigned int timestamp, @@ -1227,12 +1309,16 @@ static int adreno_waittimestamp(struct kgsl_device *device, static uint io_cnt; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct kgsl_pwrctrl *pwr = &device->pwrctrl; - int retries; - unsigned int msecs_first; - unsigned int msecs_part; + int retries = 0; + + unsigned int time_elapsed = 0; + unsigned int prev_reg_val[hang_detect_regs_count]; + unsigned int wait; + + memset(prev_reg_val, 0, sizeof(prev_reg_val)); /* Don't wait forever, set a max value for now */ - if (msecs == -1) + if (msecs == KGSL_TIMEOUT_DEFAULT) msecs = adreno_dev->wait_timeout; if (timestamp_cmp(timestamp, adreno_dev->ringbuffer.timestamp) > 0) { @@ -1243,13 +1329,18 @@ static int adreno_waittimestamp(struct kgsl_device *device, goto done; } - /* Keep the first timeout as 100msecs before rewriting - * the WPTR. Less visible impact if the WPTR has not - * been updated properly. + /* + * Make the first timeout interval 100 msecs and then try to kick the + * wptr again. This helps to ensure the wptr is updated properly. If + * the requested timeout is less than 100 msecs, then wait 20msecs which + * is the minimum amount of time we can safely wait at 100HZ */ - msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; - msecs_part = (msecs - msecs_first + 3) / 4; - for (retries = 0; retries < 5; retries++) { + if (msecs == 0 || msecs >= 100) + wait = 100; + else + wait = 20; + + do { if (kgsl_check_timestamp(device, timestamp)) { /* if the timestamp happens while we're not * waiting, there's a chance that an interrupt @@ -1265,6 +1356,11 @@ static int adreno_waittimestamp(struct kgsl_device *device, if (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction) io = 0; + + if ((retries > 0) && + (adreno_hang_detect(device, prev_reg_val))) + goto hang_dump; + mutex_unlock(&device->mutex); /* We need to make sure that the process is * placed in wait-q before its condition is called @@ -1272,9 +1368,9 @@ static int adreno_waittimestamp(struct kgsl_device *device, status = kgsl_wait_event_interruptible_timeout( device->wait_queue, kgsl_check_interrupt_timestamp(device, - timestamp), - msecs_to_jiffies(retries ? - msecs_part : msecs_first), io); + timestamp), + msecs_to_jiffies(wait), io); + mutex_lock(&device->mutex); if (status > 0) { @@ -1286,7 +1382,15 @@ static int adreno_waittimestamp(struct kgsl_device *device, goto done; } /*this wait timed out*/ - } + + time_elapsed += wait; + wait = KGSL_TIMEOUT_PART; + + retries++; + + } while (!msecs || time_elapsed < msecs); + +hang_dump: /* Check if timestamp has retired here because we may have hit * recovery which can take some time and cause waiting threads @@ -1362,8 +1466,8 @@ static long adreno_ioctl(struct kgsl_device_private *dev_priv, static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq) { - gpu_freq /= 1000000; - return ticks / gpu_freq; + s64 ticksus = (s64)ticks*1000000; + return div_u64(ticksus, gpu_freq); } static void adreno_power_stats(struct kgsl_device *device, diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 9c75f6dc6fe..d50eec6cbeb 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -49,6 +49,12 @@ #define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50 +/* One cannot wait forever for the core to idle, so set an upper limit to the + * amount of time to wait for the core to go idle + */ + +#define ADRENO_IDLE_TIMEOUT (20 * 1000) + enum adreno_gpurev { ADRENO_REV_UNKNOWN = 0, ADRENO_REV_A200 = 200, @@ -78,10 +84,12 @@ struct adreno_device { unsigned int istore_size; unsigned int pix_shader_start; unsigned int ib_check_level; + unsigned int fast_hang_detect; }; struct adreno_gpudev { /* keeps track of when we need to execute the draw workaround code */ + unsigned int reg_rbbm_status; int ctx_switches_since_last_draw; int (*ctxt_create)(struct adreno_device *, struct adreno_context *); void (*ctxt_save)(struct adreno_device *, struct adreno_context *); @@ -124,7 +132,11 @@ extern const unsigned int a220_registers[]; extern const unsigned int a200_registers_count; extern const unsigned int a220_registers_count; -int adreno_idle(struct kgsl_device *device, unsigned int timeout); +extern unsigned int hang_detect_regs[]; +extern const unsigned int hang_detect_regs_count; + + +int adreno_idle(struct kgsl_device *device); void adreno_regread(struct kgsl_device *device, unsigned int offsetwords, unsigned int *value); void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords, @@ -143,6 +155,9 @@ void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain, int adreno_dump_and_recover(struct kgsl_device *device); +unsigned int adreno_hang_detect(struct kgsl_device *device, + unsigned int *prev_reg_val); + static inline int adreno_is_a200(struct adreno_device *adreno_dev) { return (adreno_dev->gpurev == ADRENO_REV_A200); diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c index 9efd587fa16..9b9fcc8b00a 100644 --- a/drivers/gpu/msm/adreno_a2xx.c +++ b/drivers/gpu/msm/adreno_a2xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -581,6 +581,12 @@ static void build_regsave_cmds(struct adreno_device *adreno_dev, *cmd++ = REG_TP0_CHICKEN; *cmd++ = tmp_ctx.reg_values[1]; + if (adreno_is_a20x(adreno_dev)) { + *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2); + *cmd++ = REG_RB_BC_CONTROL; + *cmd++ = tmp_ctx.reg_values[2]; + } + if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; @@ -1107,6 +1113,12 @@ static void build_regrestore_cmds(struct adreno_device *adreno_dev, tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate); *cmd++ = 0x00000000; + if (adreno_is_a20x(adreno_dev)) { + *cmd++ = cp_type0_packet(REG_RB_BC_CONTROL, 1); + tmp_ctx.reg_values[2] = virt2gpu(cmd, &drawctxt->gpustate); + *cmd++ = 0x00000000; + } + if (adreno_is_a22x(adreno_dev)) { unsigned int i; unsigned int j = 2; @@ -1780,6 +1792,7 @@ void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot, int *remain, int hang); struct adreno_gpudev adreno_a2xx_gpudev = { + .reg_rbbm_status=REG_RBBM_STATUS, .ctxt_create = a2xx_drawctxt_create, .ctxt_save = a2xx_drawctxt_save, .ctxt_restore = a2xx_drawctxt_restore, diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 81d3419fc68..66935951e43 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -364,6 +364,11 @@ void adreno_debugfs_init(struct kgsl_device *device) debugfs_create_u32("ib_check", 0644, device->d_debugfs, &adreno_dev->ib_check_level); + /* By Default enable fast hang detection */ + adreno_dev->fast_hang_detect = 1; + debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs, + &adreno_dev->fast_hang_detect); + /* Create post mortem control files */ pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs); diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index 990af5f076a..4fdbebc963d 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -192,7 +192,7 @@ void adreno_drawctxt_destroy(struct kgsl_device *device, adreno_drawctxt_switch(adreno_dev, NULL, 0); } - adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + adreno_idle(device); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c index af4bb85b103..f06248e9b47 100644 --- a/drivers/gpu/msm/adreno_postmortem.c +++ b/drivers/gpu/msm/adreno_postmortem.c @@ -715,7 +715,7 @@ int adreno_postmortem_dump(struct kgsl_device *device, int manual) } if (device->state == KGSL_STATE_ACTIVE) - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); } KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X", diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index f9d9df1a03d..72f3b0bdfc1 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -47,7 +47,16 @@ #define A225_PFP_FW "a225_pfp.fw" #define A225_PM4_FW "a225_pm4.fw" -static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb) + +/* + * CP DEBUG settings for all cores: + * DYNAMIC_CLK_DISABLE [27] - turn off the dynamic clock control + * PROG_END_PTR_ENABLE [25] - Allow 128 bit writes to the VBIF + */ + +#define CP_DEBUG_DEFAULT ((1 << 27) | (1 << 25)) + +void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb) { BUG_ON(rb->wptr == 0); @@ -71,9 +80,12 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, unsigned int freecmds; unsigned int *cmds; uint cmds_gpu; - struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); - unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout); unsigned long wait_time; + unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT); + unsigned long wait_time_part; + unsigned int prev_reg_val[hang_detect_regs_count]; + + memset(prev_reg_val, 0, sizeof(prev_reg_val)); /* if wptr ahead, fill the remaining with NOPs */ if (wptr_ahead) { @@ -101,6 +113,7 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, } wait_time = jiffies + wait_timeout; + wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART); /* wait for space in ringbuffer */ while (1) { GSL_RB_GET_READPTR(rb, &rb->rptr); @@ -110,16 +123,34 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds, if (freecmds == 0 || freecmds > numcmds) break; + /* Dont wait for timeout, detect hang faster. + */ + if (time_after(jiffies, wait_time_part)) { + wait_time_part = jiffies + + msecs_to_jiffies(KGSL_TIMEOUT_PART); + if ((adreno_hang_detect(rb->device, + prev_reg_val))){ + KGSL_DRV_ERR(rb->device, + "Hang detected while waiting for freespace in" + "ringbuffer rptr: 0x%x, wptr: 0x%x\n", + rb->rptr, rb->wptr); + goto err; + } + } + if (time_after(jiffies, wait_time)) { KGSL_DRV_ERR(rb->device, "Timed out while waiting for freespace in ringbuffer " "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr); - if (!adreno_dump_and_recover(rb->device)) + goto err; + } + continue; +err: + if (!adreno_dump_and_recover(rb->device)) wait_time = jiffies + wait_timeout; else /* GPU is hung and we cannot recover */ BUG(); - } } } @@ -241,7 +272,7 @@ static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device) KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n", adreno_dev->pm4_fw[0]); - adreno_regwrite(device, REG_CP_DEBUG, 0x02000000); + adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT); adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0); for (i = 1; i < adreno_dev->pm4_fw_size; i++) adreno_regwrite(device, REG_CP_ME_RAM_DATA, @@ -433,7 +464,7 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram) adreno_ringbuffer_submit(rb); /* idle device to validate ME INIT */ - status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + status = adreno_idle(device); if (status == 0) rb->flags |= KGSL_FLAGS_STARTED; @@ -529,7 +560,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords); /* GPU may hang during space allocation, if thats the case the current * context may have hung the GPU */ - if (context->flags & CTXT_FLAGS_GPU_HANG) { + if (context && context->flags & CTXT_FLAGS_GPU_HANG) { KGSL_CTXT_WARN(rb->device, "Context %p caused a gpu hang. Will not accept commands for context %d\n", context, context->id); @@ -882,7 +913,7 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, * this is conservative but works reliably and is ok * even for performance simulations */ - adreno_idle(device, KGSL_TIMEOUT_DEFAULT); + adreno_idle(device); #endif return 0; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 7a2857c380f..39eac67e15a 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -417,7 +417,7 @@ static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state) break; case KGSL_STATE_ACTIVE: /* Wait for the device to become idle */ - device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT); + device->ftbl->idle(device); case KGSL_STATE_NAP: case KGSL_STATE_SLEEP: /* Get the completion ready to be waited upon. */ @@ -1917,6 +1917,10 @@ static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) nr = _IOC_NR(cmd); + nr = _IOC_NR(cmd); + + nr = _IOC_NR(cmd); + if (cmd & (IOC_IN | IOC_OUT)) { if (_IOC_SIZE(cmd) < sizeof(ustack)) uptr = ustack; diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index fdb4a6e2e07..ba3c3290c42 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -26,6 +26,7 @@ #define KGSL_TIMEOUT_NONE 0 #define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF +#define KGSL_TIMEOUT_PART 2000 /* 2 sec */ #define FIRST_TIMEOUT (HZ / 2) @@ -67,7 +68,7 @@ struct kgsl_functable { unsigned int offsetwords, unsigned int *value); void (*regwrite) (struct kgsl_device *device, unsigned int offsetwords, unsigned int value); - int (*idle) (struct kgsl_device *device, unsigned int timeout); + int (*idle) (struct kgsl_device *device); unsigned int (*isidle) (struct kgsl_device *device); int (*suspend_context) (struct kgsl_device *device); int (*start) (struct kgsl_device *device, unsigned int init_ram); @@ -262,9 +263,9 @@ static inline void kgsl_regwrite(struct kgsl_device *device, device->ftbl->regwrite(device, offsetwords, value); } -static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout) +static inline int kgsl_idle(struct kgsl_device *device) { - return device->ftbl->idle(device, timeout); + return device->ftbl->idle(device); } static inline unsigned int kgsl_gpuid(struct kgsl_device *device) diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c index 7458cf28914..b1d7f31987d 100644 --- a/drivers/gpu/msm/kgsl_gpummu.c +++ b/drivers/gpu/msm/kgsl_gpummu.c @@ -520,7 +520,8 @@ static void kgsl_gpummu_default_setstate(struct kgsl_device *device, return; if (flags & KGSL_MMUFLAGS_PTUPDATE) { - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + + kgsl_idle(device); gpummu_pt = device->mmu.hwpagetable->priv; kgsl_regwrite(device, MH_MMU_PT_BASE, gpummu_pt->base.gpuaddr); @@ -615,7 +616,7 @@ static int kgsl_gpummu_start(struct kgsl_device *device) kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config); /* idle device */ - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); /* enable axi interrupts */ kgsl_regwrite(device, MH_INTERRUPT_MASK, diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 81e237f110d..889d679bf1e 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -154,7 +154,7 @@ static void kgsl_iommu_setstate(struct kgsl_device *device, * specified page table */ if (mmu->hwpagetable != pagetable) { - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); kgsl_detach_pagetable_iommu_domain(mmu); mmu->hwpagetable = pagetable; if (mmu->hwpagetable) @@ -308,11 +308,11 @@ kgsl_iommu_get_current_ptbase(struct kgsl_device *device) { /* Current base is always the hwpagetables domain as we * do not use per process pagetables right not for iommu. - * This will change when we switch to per process pagetables. - */ + * This will change when we switch to per process pagetables.*/ return (unsigned int)device->mmu.hwpagetable->priv; } + struct kgsl_mmu_ops iommu_ops = { .mmu_init = kgsl_iommu_init, .mmu_close = kgsl_iommu_close, diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index 975aa83781a..5dc5de31abe 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -526,7 +526,7 @@ void kgsl_mh_start(struct kgsl_device *device) struct kgsl_mh *mh = &device->mh; /* force mmu off to for now*/ kgsl_regwrite(device, MH_MMU_CONFIG, 0); - kgsl_idle(device, KGSL_TIMEOUT_DEFAULT); + kgsl_idle(device); /* define physical memory range accessible by the core */ kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base); diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index e9f21438eb0..800edb5a26e 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -64,6 +64,9 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, new_level >= pwr->thermal_pwrlevel && new_level != pwr->active_pwrlevel) { struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level]; + int diff = new_level - pwr->active_pwrlevel; + int d = (diff > 0) ? 1 : -1; + int level = pwr->active_pwrlevel; pwr->active_pwrlevel = new_level; if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) || (device->state == KGSL_STATE_NAP)) { @@ -73,9 +76,16 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device, * Idle the gpu core before changing the clock freq. */ if (pwr->idle_needed == true) - device->ftbl->idle(device, - KGSL_TIMEOUT_DEFAULT); - clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq); + device->ftbl->idle(device); + + /* Don't shift by more than one level at a time to + * avoid glitches. + */ + while (level != new_level) { + level += d; + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels[level].gpu_freq); + } } if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) { if (pwr->pcl) @@ -126,7 +136,7 @@ static int __gpuclk_store(int max, struct device *dev, if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq > pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq) kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel); - else if (!max) + else if (!max || (NULL == device->pwrscale.policy)) kgsl_pwrctrl_pwrlevel_change(device, i); done: @@ -346,23 +356,34 @@ void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, for (i = KGSL_MAX_CLKS - 1; i > 0; i--) if (pwr->grp_clks[i]) clk_disable(pwr->grp_clks[i]); + /* High latency clock maintenance. */ if ((pwr->pwrlevels[0].gpu_freq > 0) && - (requested_state != KGSL_STATE_NAP)) + (requested_state != KGSL_STATE_NAP)) { clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. gpu_freq); + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) + if (pwr->grp_clks[i]) + clk_unprepare(pwr->grp_clks[i]); + } kgsl_pwrctrl_busy_time(device, true); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) { trace_kgsl_clk(device, state); - if ((pwr->pwrlevels[0].gpu_freq > 0) && - (device->state != KGSL_STATE_NAP)) - clk_set_rate(pwr->grp_clks[0], - pwr->pwrlevels[pwr->active_pwrlevel]. + /* High latency clock maintenance. */ + if (device->state != KGSL_STATE_NAP) { + for (i = KGSL_MAX_CLKS - 1; i > 0; i--) + if (pwr->grp_clks[i]) + clk_prepare(pwr->grp_clks[i]); + + if (pwr->pwrlevels[0].gpu_freq > 0) + clk_set_rate(pwr->grp_clks[0], + pwr->pwrlevels + [pwr->active_pwrlevel]. gpu_freq); - + } /* as last step, enable grp_clk this is to let GPU interrupt to come */ for (i = KGSL_MAX_CLKS - 1; i > 0; i--) @@ -852,9 +873,8 @@ void kgsl_pwrctrl_wake(struct kgsl_device *device) mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); wake_lock(&device->idle_wakelock); - if (device->pwrctrl.restore_slumber == false) - pm_qos_update_request(&device->pm_qos_req_dma, - GPU_SWFI_LATENCY); + pm_qos_update_request(&device->pm_qos_req_dma, + GPU_SWFI_LATENCY); case KGSL_STATE_ACTIVE: break; default: diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c index 1b1ca0ba6b5..fa53648723e 100644 --- a/drivers/gpu/msm/z180.c +++ b/drivers/gpu/msm/z180.c @@ -289,15 +289,22 @@ static int z180_setup_pt(struct kgsl_device *device, return result; } -static inline unsigned int rb_offset(unsigned int index) +static inline unsigned int rb_offset(unsigned int timestamp) { - return index*sizeof(unsigned int)*(Z180_PACKET_SIZE); + return (timestamp % Z180_PACKET_COUNT) + *sizeof(unsigned int)*(Z180_PACKET_SIZE); } -static void addmarker(struct z180_ringbuffer *rb, unsigned int index) +static inline unsigned int rb_gpuaddr(struct z180_device *z180_dev, + unsigned int timestamp) +{ + return z180_dev->ringbuffer.cmdbufdesc.gpuaddr + rb_offset(timestamp); +} + +static void addmarker(struct z180_ringbuffer *rb, unsigned int timestamp) { char *ptr = (char *)(rb->cmdbufdesc.hostptr); - unsigned int *p = (unsigned int *)(ptr + rb_offset(index)); + unsigned int *p = (unsigned int *)(ptr + rb_offset(timestamp)); *p++ = Z180_STREAM_PACKET; *p++ = (Z180_MARKER_CMD | 5); @@ -311,11 +318,11 @@ static void addmarker(struct z180_ringbuffer *rb, unsigned int index) *p++ = ADDR_VGV3_LAST << 24; } -static void addcmd(struct z180_ringbuffer *rb, unsigned int index, +static void addcmd(struct z180_ringbuffer *rb, unsigned int timestamp, unsigned int cmd, unsigned int nextcnt) { char * ptr = (char *)(rb->cmdbufdesc.hostptr); - unsigned int *p = (unsigned int *)(ptr + (rb_offset(index) + unsigned int *p = (unsigned int *)(ptr + (rb_offset(timestamp) + (Z180_MARKER_SIZE * sizeof(unsigned int)))); *p++ = Z180_STREAM_PACKET_CALL; @@ -338,7 +345,7 @@ static void z180_cmdstream_start(struct kgsl_device *device) z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4); z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR, - z180_dev->ringbuffer.cmdbufdesc.gpuaddr); + rb_gpuaddr(z180_dev, z180_dev->current_timestamp)); z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5); @@ -362,7 +369,7 @@ static int room_in_rb(struct z180_device *device) return ts_diff < Z180_PACKET_COUNT; } -static int z180_idle(struct kgsl_device *device, unsigned int timeout) +static int z180_idle(struct kgsl_device *device) { int status = 0; struct z180_device *z180_dev = Z180_DEVICE(device); @@ -370,7 +377,7 @@ static int z180_idle(struct kgsl_device *device, unsigned int timeout) if (timestamp_cmp(z180_dev->current_timestamp, z180_dev->timestamp) > 0) status = z180_wait(device, z180_dev->current_timestamp, - timeout); + Z180_IDLE_TIMEOUT); if (status) KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n"); @@ -389,9 +396,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, long result = 0; unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int); unsigned int cnt = 5; - unsigned int nextaddr = 0; - unsigned int index = 0; - unsigned int nextindex; + unsigned int old_timestamp = 0; unsigned int nextcnt = Z180_STREAM_END_CMD | 5; struct kgsl_mem_entry *entry = NULL; unsigned int cmd; @@ -461,26 +466,22 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv, } result = 0; - index = z180_dev->current_timestamp % Z180_PACKET_COUNT; + old_timestamp = z180_dev->current_timestamp; z180_dev->current_timestamp++; - nextindex = z180_dev->current_timestamp % Z180_PACKET_COUNT; *timestamp = z180_dev->current_timestamp; z180_dev->ringbuffer.prevctx = context->id; - addcmd(&z180_dev->ringbuffer, index, cmd + ofs, cnt); + addcmd(&z180_dev->ringbuffer, old_timestamp, cmd + ofs, cnt); kgsl_pwrscale_busy(device); /* Make sure the next ringbuffer entry has a marker */ - addmarker(&z180_dev->ringbuffer, nextindex); - - nextaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr - + rb_offset(nextindex); + addmarker(&z180_dev->ringbuffer, z180_dev->current_timestamp); /* monkey patch the IB so that it jumps back to the ringbuffer */ kgsl_sharedmem_writel(&entry->memdesc, - ((sizedwords + 1) * sizeof(unsigned int)), - nextaddr); + ((sizedwords + 1) * sizeof(unsigned int)), + rb_gpuaddr(z180_dev, z180_dev->current_timestamp)); kgsl_sharedmem_writel(&entry->memdesc, ((sizedwords + 2) * sizeof(unsigned int)), nextcnt); @@ -592,7 +593,7 @@ static int z180_start(struct kgsl_device *device, unsigned int init_ram) static int z180_stop(struct kgsl_device *device) { device->ftbl->irqctrl(device, 0); - z180_idle(device, KGSL_TIMEOUT_DEFAULT); + z180_idle(device); del_timer_sync(&device->idle_timer); @@ -859,7 +860,7 @@ z180_drawctxt_destroy(struct kgsl_device *device, { struct z180_device *z180_dev = Z180_DEVICE(device); - z180_idle(device, KGSL_TIMEOUT_DEFAULT); + z180_idle(device); if (z180_dev->ringbuffer.prevctx == context->id) { z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT; diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h index e5c5ef303c5..2962ccd82e6 100644 --- a/drivers/gpu/msm/z180.h +++ b/drivers/gpu/msm/z180.h @@ -21,6 +21,9 @@ #define Z180_DEFAULT_PWRSCALE_POLICY NULL +/* Wait a maximum of 10 seconds when trying to idle the core */ +#define Z180_IDLE_TIMEOUT (10 * 1000) + struct z180_ringbuffer { unsigned int prevctx; struct kgsl_memdesc cmdbufdesc; diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 58434e804d9..37fe2463f9b 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -26,6 +26,7 @@ #include #include +#include #include struct vga_switcheroo_client { @@ -256,8 +257,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) if (new_client->fb_info) { struct fb_event event; + console_lock(); event.info = new_client->fb_info; fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); + console_unlock(); } ret = vgasr_priv.handler->switchto(new_client->id); diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index 8965ad93d51..b99af346fdf 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c @@ -45,6 +45,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, case 0xff09: ch_map_key_clear(BTN_9); break; case 0xff0a: ch_map_key_clear(BTN_A); break; case 0xff0b: ch_map_key_clear(BTN_B); break; + case 0x00f1: ch_map_key_clear(KEY_WLAN); break; + case 0x00f2: ch_map_key_clear(KEY_BRIGHTNESSDOWN); break; + case 0x00f3: ch_map_key_clear(KEY_BRIGHTNESSUP); break; + case 0x00f4: ch_map_key_clear(KEY_DISPLAY_OFF); break; + case 0x00f7: ch_map_key_clear(KEY_CAMERA); break; + case 0x00f8: ch_map_key_clear(KEY_PROG1); break; default: return 0; } @@ -53,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, static const struct hid_device_id ch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, { } }; MODULE_DEVICE_TABLE(hid, ch_devices); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6c2a31593a2..1129c526c00 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -361,7 +361,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: parser->global.report_size = item_udata(item); - if (parser->global.report_size > 32) { + if (parser->global.report_size > 96) { dbg_hid("invalid report_size %d\n", parser->global.report_size); return -1; @@ -1372,6 +1372,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, @@ -1382,11 +1383,13 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) }, { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 206f75021d5..08cc68ba9eb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -21,6 +21,7 @@ #define USB_VENDOR_ID_3M 0x0596 #define USB_DEVICE_ID_3M1968 0x0500 #define USB_DEVICE_ID_3M2256 0x0502 +#define USB_DEVICE_ID_3M3266 0x0506 #define USB_VENDOR_ID_A4TECH 0x09da #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 @@ -58,6 +59,9 @@ #define USB_VENDOR_ID_AIRCABLE 0x16CA #define USB_DEVICE_ID_AIRCABLE1 0x1502 +#define USB_VENDOR_ID_AIREN 0x1a2c +#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002 + #define USB_VENDOR_ID_ALCOR 0x058f #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720 @@ -185,6 +189,7 @@ #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 +#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 #define USB_VENDOR_ID_CHUNGHWAT 0x2247 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 @@ -230,11 +235,14 @@ #define USB_VENDOR_ID_DWAV 0x0eef #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1 -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e -#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 685d8e42587..1308703afdb 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -593,6 +593,9 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_3M, HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M2256) }, + { .driver_data = MT_CLS_3M, + HID_USB_DEVICE(USB_VENDOR_ID_3M, + USB_DEVICE_ID_3M3266) }, /* ActionStar panels */ { .driver_data = MT_CLS_DEFAULT, @@ -629,23 +632,32 @@ static const struct hid_device_id mt_devices[] = { USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, /* eGalax devices (resistive) */ - { .driver_data = MT_CLS_EGALAX, + { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, - { .driver_data = MT_CLS_EGALAX, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) }, + { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) }, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) }, /* eGalax devices (capacitive) */ - { .driver_data = MT_CLS_EGALAX, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) }, + { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, - { .driver_data = MT_CLS_EGALAX, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) }, + { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) }, - { .driver_data = MT_CLS_EGALAX, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) }, + { .driver_data = MT_CLS_EGALAX, HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) }, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, /* Elo TouchSystems IntelliTouch Plus panel */ { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID, diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 3146fdcda27..85c845f7f61 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -52,6 +52,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 92fd1d787ed..947a8416e53 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -482,8 +482,9 @@ config SENSORS_JC42 If you say yes here, you get support for JEDEC JC42.4 compliant temperature sensors, which are used on many DDR3 memory modules for mobile devices and servers. Support will include, but not be limited - to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, - MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3. + to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805, + MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002, + STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2. This driver can also be built as a module. If so, the module will be called jc42. diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c index 65a35cf5b3c..61ab615400e 100644 --- a/drivers/hwmon/abituguru.c +++ b/drivers/hwmon/abituguru.c @@ -1280,14 +1280,18 @@ static int __devinit abituguru_probe(struct platform_device *pdev) pr_info("found Abit uGuru\n"); /* Register sysfs hooks */ - for (i = 0; i < sysfs_attr_i; i++) - if (device_create_file(&pdev->dev, - &data->sysfs_attr[i].dev_attr)) + for (i = 0; i < sysfs_attr_i; i++) { + res = device_create_file(&pdev->dev, + &data->sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; - for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) - if (device_create_file(&pdev->dev, - &abituguru_sysfs_attr[i].dev_attr)) + } + for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) { + res = device_create_file(&pdev->dev, + &abituguru_sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; + } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (!IS_ERR(data->hwmon_dev)) diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c index e9beeda4cbe..9a5af38bffd 100644 --- a/drivers/hwmon/ads1015.c +++ b/drivers/hwmon/ads1015.c @@ -284,7 +284,7 @@ static int ads1015_probe(struct i2c_client *client, continue; err = device_create_file(&client->dev, &ads1015_in[k].dev_attr); if (err) - goto exit_free; + goto exit_remove; } data->hwmon_dev = hwmon_device_register(&client->dev); @@ -298,7 +298,6 @@ static int ads1015_probe(struct i2c_client *client, exit_remove: for (k = 0; k < ADS1015_CHANNELS; ++k) device_remove_file(&client->dev, &ads1015_in[k].dev_attr); -exit_free: kfree(data); exit: return err; diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c index 52319340e18..a5737a54461 100644 --- a/drivers/hwmon/ads7871.c +++ b/drivers/hwmon/ads7871.c @@ -133,6 +133,12 @@ static ssize_t show_voltage(struct device *dev, } } +static ssize_t ads7871_show_name(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); +} + static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2); @@ -142,6 +148,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7); +static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL); + static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, @@ -151,6 +159,7 @@ static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, + &dev_attr_name.attr, NULL }; diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 4c0743660e9..d99aa848477 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) int i; if (send_command(cmd) || send_argument(key)) { - pr_warn("%s: read arg fail\n", key); + pr_warn("%.4s: read arg fail\n", key); return -EIO; } @@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) for (i = 0; i < len; i++) { if (__wait_status(0x05)) { - pr_warn("%s: read data fail\n", key); + pr_warn("%.4s: read data fail\n", key); return -EIO; } buffer[i] = inb(APPLESMC_DATA_PORT); diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 00e98517f94..83d2fbd670d 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") } + }, { + /* Old interface reads the same sensor for fan0 and fan1 */ + .ident = "Asus M5A78L", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M5A78L") + } }, { } }; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 6163cfa95c3..87fd034dabd 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -42,21 +42,20 @@ #define DRVNAME "coretemp" #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ -#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ +#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ #define MAX_ATTRS 5 /* Maximum no of per-core attrs */ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) -#ifdef CONFIG_SMP #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id +#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) + +#ifdef CONFIG_SMP #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) #else -#define TO_PHYS_ID(cpu) (cpu) -#define TO_CORE_ID(cpu) (cpu) #define for_each_sibling(i, cpu) for (i = 0; false; ) #endif -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) /* * Per-Core Temperature Data @@ -709,7 +708,7 @@ static void __cpuinit get_core_online(unsigned int cpu) * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ - if (!cpu_has(c, X86_FEATURE_DTS)) + if (!cpu_has(c, X86_FEATURE_DTHERM)) return; if (!pdev) { @@ -752,6 +751,10 @@ static void __cpuinit put_core_offline(unsigned int cpu) indx = TO_ATTR_NO(cpu); + /* The core id is too big, just return */ + if (indx > MAX_CORE_DATA - 1) + return; + if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) coretemp_remove_core(pdata, &pdev->dev, indx); diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c index 92f949767ec..6dbfd3e516e 100644 --- a/drivers/hwmon/f71805f.c +++ b/drivers/hwmon/f71805f.c @@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg) static inline u8 temp_to_reg(long val) { - if (val < 0) - val = 0; - else if (val > 1000 * 0xff) - val = 0xff; - return ((val + 500) / 1000); + if (val <= 0) + return 0; + if (val >= 1000 * 0xff) + return 0xff; + return (val + 500) / 1000; } /* diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 95cbfb3a707..040a820acd1 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -159,7 +159,7 @@ static inline void f75375_write8(struct i2c_client *client, u8 reg, static inline void f75375_write16(struct i2c_client *client, u8 reg, u16 value) { - int err = i2c_smbus_write_byte_data(client, reg, (value << 8)); + int err = i2c_smbus_write_byte_data(client, reg, (value >> 8)); if (err) return; i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF)); @@ -304,20 +304,21 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val) case 0: /* Full speed */ fanmode |= (3 << FAN_CTRL_MODE(nr)); data->pwm[nr] = 255; - f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr), - data->pwm[nr]); break; case 1: /* PWM */ fanmode |= (3 << FAN_CTRL_MODE(nr)); break; case 2: /* AUTOMATIC*/ - fanmode |= (2 << FAN_CTRL_MODE(nr)); + fanmode |= (1 << FAN_CTRL_MODE(nr)); break; case 3: /* fan speed */ break; } f75375_write8(client, F75375_REG_FAN_TIMER, fanmode); data->pwm_enable[nr] = val; + if (val == 0) + f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr), + data->pwm[nr]); return 0; } diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index 523f8fb9e7d..770e95951a5 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); MODULE_AUTHOR("Andreas Herrmann "); MODULE_LICENSE("GPL"); +/* Family 16h Northbridge's function 4 PCI ID */ +#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 + /* D18F3 */ #define REG_NORTHBRIDGE_CAP 0xe8 @@ -60,15 +63,15 @@ static ssize_t show_power(struct device *dev, pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), REG_TDP_RUNNING_AVERAGE, &val); running_avg_capture = (val >> 4) & 0x3fffff; - running_avg_capture = sign_extend32(running_avg_capture, 22); - running_avg_range = val & 0xf; + running_avg_capture = sign_extend32(running_avg_capture, 21); + running_avg_range = (val & 0xf) + 1; pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), REG_TDP_LIMIT3, &val); tdp_limit = val >> 16; - curr_pwr_watts = tdp_limit + data->base_tdp - - (s32)(running_avg_capture >> (running_avg_range + 1)); + curr_pwr_watts = (tdp_limit + data->base_tdp) << running_avg_range; + curr_pwr_watts -= running_avg_capture; curr_pwr_watts *= data->tdp_to_watts; /* @@ -78,7 +81,7 @@ static ssize_t show_power(struct device *dev, * scaling factor 1/(2^16). For conversion we use * (10^6)/(2^16) = 15625/(2^10) */ - curr_pwr_watts = (curr_pwr_watts * 15625) >> 10; + curr_pwr_watts = (curr_pwr_watts * 15625) >> (10 + running_avg_range); return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts); } static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL); @@ -122,6 +125,51 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4) return true; } +/* + * Newer BKDG versions have an updated recommendation on how to properly + * initialize the running average range (was: 0xE, now: 0x9). This avoids + * counter saturations resulting in bogus power readings. + * We correct this value ourselves to cope with older BIOSes. + */ +static const struct pci_device_id affected_device[] = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { 0 } +}; + +static void tweak_runavg_range(struct pci_dev *pdev) +{ + u32 val; + + /* + * let this quirk apply only to the current version of the + * northbridge, since future versions may change the behavior + */ + if (!pci_match_id(affected_device, pdev)) + return; + + pci_bus_read_config_dword(pdev->bus, + PCI_DEVFN(PCI_SLOT(pdev->devfn), 5), + REG_TDP_RUNNING_AVERAGE, &val); + if ((val & 0xf) != 0xe) + return; + + val &= ~0xf; + val |= 0x9; + pci_bus_write_config_dword(pdev->bus, + PCI_DEVFN(PCI_SLOT(pdev->devfn), 5), + REG_TDP_RUNNING_AVERAGE, val); +} + +#ifdef CONFIG_PM +static int fam15h_power_resume(struct pci_dev *pdev) +{ + tweak_runavg_range(pdev); + return 0; +} +#else +#define fam15h_power_resume NULL +#endif + static void __devinit fam15h_power_init_data(struct pci_dev *f4, struct fam15h_power_data *data) { @@ -155,6 +203,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev, struct device *dev; int err; + /* + * though we ignore every other northbridge, we still have to + * do the tweaking on _each_ node in MCM processors as the counters + * are working hand-in-hand + */ + tweak_runavg_range(pdev); + if (!fam15h_power_is_internal_node0(pdev)) { err = -ENODEV; goto exit; @@ -204,6 +259,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev) static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, {} }; MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); @@ -213,6 +269,7 @@ static struct pci_driver fam15h_power_driver = { .id_table = fam15h_power_id_table, .probe = fam15h_power_probe, .remove = __devexit_p(fam15h_power_remove), + .resume = fam15h_power_resume, }; static int __init fam15h_power_init(void) diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 5f524775043..b358c87c3bc 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -2057,7 +2057,7 @@ static void __devinit it87_init_device(struct platform_device *pdev) /* Start monitoring */ it87_write_value(data, IT87_REG_CONFIG, - (it87_read_value(data, IT87_REG_CONFIG) & 0x36) + (it87_read_value(data, IT87_REG_CONFIG) & 0x3e) | (update_vbat ? 0x41 : 0x01)); } diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 02cebb74e20..ed4392406e9 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -64,6 +64,7 @@ static const unsigned short normal_i2c[] = { /* Manufacturer IDs */ #define ADT_MANID 0x11d4 /* Analog Devices */ +#define ATMEL_MANID 0x001f /* Atmel */ #define MAX_MANID 0x004d /* Maxim */ #define IDT_MANID 0x00b3 /* IDT */ #define MCP_MANID 0x0054 /* Microchip */ @@ -77,15 +78,25 @@ static const unsigned short normal_i2c[] = { #define ADT7408_DEVID 0x0801 #define ADT7408_DEVID_MASK 0xffff +/* Atmel */ +#define AT30TS00_DEVID 0x8201 +#define AT30TS00_DEVID_MASK 0xffff + /* IDT */ #define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */ #define TS3000B3_DEVID_MASK 0xffff +#define TS3000GB2_DEVID 0x2912 /* Also matches TSE2002GB2 */ +#define TS3000GB2_DEVID_MASK 0xffff + /* Maxim */ #define MAX6604_DEVID 0x3e00 #define MAX6604_DEVID_MASK 0xffff /* Microchip */ +#define MCP9804_DEVID 0x0200 +#define MCP9804_DEVID_MASK 0xfffc + #define MCP98242_DEVID 0x2000 #define MCP98242_DEVID_MASK 0xfffc @@ -113,6 +124,12 @@ static const unsigned short normal_i2c[] = { #define STTS424E_DEVID 0x0000 #define STTS424E_DEVID_MASK 0xfffe +#define STTS2002_DEVID 0x0300 +#define STTS2002_DEVID_MASK 0xffff + +#define STTS3000_DEVID 0x0200 +#define STTS3000_DEVID_MASK 0xffff + static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 }; struct jc42_chips { @@ -123,8 +140,11 @@ struct jc42_chips { static struct jc42_chips jc42_chips[] = { { ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK }, + { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK }, { IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK }, + { IDT_MANID, TS3000GB2_DEVID, TS3000GB2_DEVID_MASK }, { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK }, + { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK }, { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK }, { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK }, { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK }, @@ -133,6 +153,8 @@ static struct jc42_chips jc42_chips[] = { { NXP_MANID, SE98_DEVID, SE98_DEVID_MASK }, { STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK }, { STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK }, + { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK }, + { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK }, }; /* Each client has this additional data */ @@ -161,10 +183,12 @@ static struct jc42_data *jc42_update_device(struct device *dev); static const struct i2c_device_id jc42_id[] = { { "adt7408", 0 }, + { "at30ts00", 0 }, { "cat94ts02", 0 }, { "cat6095", 0 }, { "jc42", 0 }, { "max6604", 0 }, + { "mcp9804", 0 }, { "mcp9805", 0 }, { "mcp98242", 0 }, { "mcp98243", 0 }, @@ -173,8 +197,10 @@ static const struct i2c_device_id jc42_id[] = { { "se97b", 0 }, { "se98", 0 }, { "stts424", 0 }, - { "tse2002b3", 0 }, - { "ts3000b3", 0 }, + { "stts2002", 0 }, + { "stts3000", 0 }, + { "tse2002", 0 }, + { "ts3000", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, jc42_id); diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c index 58eded27f38..c9910f72f1c 100644 --- a/drivers/hwmon/lineage-pem.c +++ b/drivers/hwmon/lineage-pem.c @@ -421,6 +421,7 @@ static struct attribute *pem_input_attributes[] = { &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, + NULL }; static const struct attribute_group pem_input_group = { @@ -431,6 +432,7 @@ static struct attribute *pem_fan_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, + NULL }; static const struct attribute_group pem_fan_group = { diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c index f20d9978ee7..8c3df047e56 100644 --- a/drivers/hwmon/max6639.c +++ b/drivers/hwmon/max6639.c @@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END }; static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 }; -#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \ - (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val))) +#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \ + 0 : (rpm_ranges[rpm_range] * 30) / (val)) #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255) /* @@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev, return PTR_ERR(data); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index], - data->ppr, data->rpm_range)); + data->rpm_range)); } static ssize_t show_alarm(struct device *dev, @@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client) struct max6639_data *data = i2c_get_clientdata(client); struct max6639_platform_data *max6639_info = client->dev.platform_data; - int i = 0; + int i; int rpm_range = 1; /* default: 4000 RPM */ - int err = 0; + int err; /* Reset chip to default values, see below for GCONFIG setup */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, @@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client) else data->ppr = 2; data->ppr -= 1; - err = i2c_smbus_write_byte_data(client, - MAX6639_REG_FAN_PPR(i), - data->ppr << 5); - if (err) - goto exit; if (max6639_info) rpm_range = rpm_range_to_reg(max6639_info->rpm_range); @@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client) for (i = 0; i < 2; i++) { + /* Set Fan pulse per revolution */ + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_PPR(i), + data->ppr << 6); + if (err) + goto exit; + /* Fans config PWM, RPM */ err = i2c_smbus_write_byte_data(client, MAX6639_REG_FAN_CONFIG1(i), diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c index 8e31a8e2c74..ffa54dd7dbd 100644 --- a/drivers/hwmon/pmbus_core.c +++ b/drivers/hwmon/pmbus_core.c @@ -50,7 +50,8 @@ lcrit_alarm, crit_alarm */ #define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm, crit_alarm */ -#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */ +#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm + */ #define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */ #define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm, lcrit_alarm, crit_alarm */ diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index cf4330b352e..8aa6e1215cf 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb, static int __devinit sht15_probe(struct platform_device *pdev) { - int ret = 0; + int ret; struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL); u8 status = 0; @@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev) init_waitqueue_head(&data->wait_queue); if (pdev->dev.platform_data == NULL) { + ret = -EINVAL; dev_err(&pdev->dev, "no platform data supplied\n"); goto err_free_data; } @@ -925,7 +926,13 @@ static int __devinit sht15_probe(struct platform_device *pdev) if (voltage) data->supply_uV = voltage; - regulator_enable(data->reg); + ret = regulator_enable(data->reg); + if (ret != 0) { + dev_err(&pdev->dev, + "failed to enable regulator: %d\n", ret); + goto err_free_data; + } + /* * Setup a notifier block to update this if another device * causes the voltage to change diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c index 57240740b16..b6adfac6ca9 100644 --- a/drivers/hwmon/twl4030-madc-hwmon.c +++ b/drivers/hwmon/twl4030-madc-hwmon.c @@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct twl4030_madc_request req; + struct twl4030_madc_request req = { + .channels = 1 << attr->index, + .method = TWL4030_MADC_SW2, + .type = TWL4030_MADC_WAIT, + }; long val; - req.channels = (1 << attr->index); - req.method = TWL4030_MADC_SW2; - req.func_cb = NULL; val = twl4030_madc_conversion(&req); if (val < 0) return val; diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 4b2fc50c84f..1198a6e5e8c 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -1295,6 +1295,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, { struct w83627ehf_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + struct w83627ehf_sio_data *sio_data = dev->platform_data; int nr = sensor_attr->index; unsigned long val; int err; @@ -1306,6 +1307,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, if (val > 1) return -EINVAL; + + /* On NCT67766F, DC mode is only supported for pwm1 */ + if (sio_data->kind == nct6776 && nr && val != 1) + return -EINVAL; + mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); data->pwm_mode[nr] = val; @@ -1577,7 +1583,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = step_time_to_reg(val, data->pwm_mode[nr]); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \ + w83627ehf_write_value(data, data->REG_##REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } \ @@ -1817,7 +1823,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) goto exit; } - data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL); + data = devm_kzalloc(&pdev->dev, sizeof(struct w83627ehf_data), + GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit_release; @@ -1827,6 +1834,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) mutex_init(&data->lock); mutex_init(&data->update_lock); data->name = w83627ehf_device_names[sio_data->kind]; + data->bank = 0xff; /* Force initial bank selection */ platform_set_drvdata(pdev, data); /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ @@ -2098,9 +2106,29 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) fan4min = 0; fan5pin = 0; } else if (sio_data->kind == nct6776) { - fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); - fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); - fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); + bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80; + u8 regval; + + superio_select(sio_data->sioreg, W83627EHF_LD_HWM); + regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE); + + if (regval & 0x80) + fan3pin = gpok; + else + fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); + + if (regval & 0x40) + fan4pin = gpok; + else + fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) + & 0x01); + + if (regval & 0x20) + fan5pin = gpok; + else + fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) + & 0x02); + fan4min = fan4pin; } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan3pin = 1; @@ -2293,9 +2321,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) exit_remove: w83627ehf_device_remove_files(dev); - kfree(data); - platform_set_drvdata(pdev, NULL); exit_release: + platform_set_drvdata(pdev, NULL); release_region(res->start, IOREGION_LENGTH); exit: return err; @@ -2309,7 +2336,6 @@ static int __devexit w83627ehf_remove(struct platform_device *pdev) w83627ehf_device_remove_files(&pdev->dev); release_region(data->addr, IOREGION_LENGTH); platform_set_drvdata(pdev, NULL); - kfree(data); return 0; } diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index eca3bcc4599..f2ce8c36816 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -103,8 +103,14 @@ static int sclhi(struct i2c_algo_bit_data *adap) * chips may hold it low ("clock stretching") while they * are processing data internally. */ - if (time_after(jiffies, start + adap->timeout)) + if (time_after(jiffies, start + adap->timeout)) { + /* Test one last time, as we may have been preempted + * between last check and timeout test. + */ + if (getscl(adap)) + break; return -ETIMEDOUT; + } cond_resched(); } #ifdef DEBUG diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index dd364171f9c..cd7ac5c6783 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -140,7 +140,7 @@ static unsigned short ali1535_smba; defined to make the transition easier. */ static int __devinit ali1535_setup(struct pci_dev *dev) { - int retval = -ENODEV; + int retval; unsigned char temp; /* Check the following things: @@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev) if (ali1535_smba == 0) { dev_warn(&dev->dev, "ALI1535_smb region uninitialized - upgrade BIOS?\n"); + retval = -ENODEV; goto exit; } @@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev) ali1535_driver.name)) { dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n", ali1535_smba); + retval = -EBUSY; goto exit; } @@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev) pci_read_config_byte(dev, SMBCFG, &temp); if ((temp & ALI1535_SMBIO_EN) == 0) { dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n"); + retval = -ENODEV; goto exit_free; } @@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev) pci_read_config_byte(dev, SMBHSTCFG, &temp); if ((temp & 1) == 0) { dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n"); + retval = -ENODEV; goto exit_free; } @@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev) dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp); dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba); - retval = 0; -exit: - return retval; + return 0; exit_free: release_region(ali1535_smba, ALI1535_SMB_IOSIZE); +exit: return retval; } diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index a76d85fa3ad..79b4bcb3b85 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev) dev->clk = NULL; davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0); - free_irq(IRQ_I2C, dev); + free_irq(dev->irq, dev); iounmap(dev->base); kfree(dev); diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 8abfa4a03ce..656b028d981 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -242,7 +242,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap) if (pch_clk > PCH_MAX_CLK) pch_clk = 62500; - pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8; + pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8); /* Set transfer speed in I2CBC */ iowrite32(pch_i2cbc, p + PCH_I2CBC); diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 7e78f7c8785..3d471d56bf1 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -72,6 +72,7 @@ #define MXS_I2C_QUEUESTAT (0x70) #define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000 +#define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F #define MXS_I2C_QUEUECMD (0x80) @@ -219,14 +220,14 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int ret; int flags; - init_completion(&i2c->cmd_complete); - dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); if (msg->len == 0) return -EINVAL; + init_completion(&i2c->cmd_complete); + flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; if (msg->flags & I2C_M_RD) @@ -286,6 +287,7 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id) { struct mxs_i2c_dev *i2c = dev_id; u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK; + bool is_last_cmd; if (!stat) return IRQ_NONE; @@ -300,9 +302,14 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id) else i2c->cmd_err = 0; - complete(&i2c->cmd_complete); + is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) & + MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0; + + if (is_last_cmd || i2c->cmd_err) + complete(&i2c->cmd_complete); writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR); + return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index ff1e127dfea..4853b52a40a 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar, error = acpi_check_region(smbus->base, smbus->size, nforce2_driver.name); if (error) - return -1; + return error; if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) { dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n", diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 58a58c7eaa1..137e1a3bfad 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -235,7 +235,7 @@ const static u8 omap4_reg_map[] = { [OMAP_I2C_BUF_REG] = 0x94, [OMAP_I2C_CNT_REG] = 0x98, [OMAP_I2C_DATA_REG] = 0x9c, - [OMAP_I2C_SYSC_REG] = 0x20, + [OMAP_I2C_SYSC_REG] = 0x10, [OMAP_I2C_CON_REG] = 0xa4, [OMAP_I2C_OA_REG] = 0xa8, [OMAP_I2C_SA_REG] = 0xac, diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 04be9f82e14..eb8ad538c79 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev, { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); - /* FIXME: shouldn't this be clk_disable? */ - clk_enable(alg_data->clk); + clk_disable(alg_data->clk); return 0; } diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c index 437586611d4..6d60284cc04 100644 --- a/drivers/i2c/busses/i2c-sis5595.c +++ b/drivers/i2c/busses/i2c-sis5595.c @@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev) u16 a; u8 val; int *i; - int retval = -ENODEV; + int retval; /* Look for imposters */ for (i = blacklist; *i != 0; i++) { @@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev) error: release_region(sis5595_base + SMB_INDEX, 2); - return retval; + return -ENODEV; } static int sis5595_transaction(struct i2c_adapter *adap) diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index e6f539e26f6..b617fd068ac 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev) { unsigned char b; struct pci_dev *dummy = NULL; - int retval = -ENODEV, i; + int retval, i; /* check for supported SiS devices */ for (i=0; supported[i] > 0 ; i++) { @@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev) */ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) { dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n"); + retval = -ENODEV; goto exit; } /* if ACPI already enabled , do nothing */ if (!(b & 0x80) && pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) { dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n"); + retval = -ENODEV; goto exit; } /* Determine the ACPI base address */ if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) { dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n"); + retval = -ENODEV; goto exit; } @@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev) sis630_driver.name)) { dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already " "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA); + retval = -EBUSY; goto exit; } diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 0b012f1f8ac..58261d4725b 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned char temp; - int error = -ENODEV; + int error; /* Determine the address of the SMBus areas */ if (force_addr) { @@ -390,6 +390,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev, dev_err(&pdev->dev, "SMBUS: Error: Host SMBus " "controller not enabled! - upgrade BIOS or " "use force=1\n"); + error = -ENODEV; goto release_region; } } @@ -422,9 +423,11 @@ static int __devinit vt596_probe(struct pci_dev *pdev, "SMBus Via Pro adapter at %04x", vt596_smba); vt596_pdev = pci_dev_get(pdev); - if (i2c_add_adapter(&vt596_adapter)) { + error = i2c_add_adapter(&vt596_adapter); + if (error) { pci_dev_put(vt596_pdev); vt596_pdev = NULL; + goto release_region; } /* Always return failure here. This is to allow other drivers to bind diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 4bb68f35caf..64e7065e8b8 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -311,10 +311,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) /* last message in transfer -> STOP */ data |= XIIC_TX_DYN_STOP_MASK; dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); - - xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); - } else - xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data); + } + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); } } diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c index d267b7affad..a22ca846701 100644 --- a/drivers/ide/ide-floppy_ioctl.c +++ b/drivers/ide/ide-floppy_ioctl.c @@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev, * and CDROM_SEND_PACKET (legacy) ioctls */ if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND) - err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk, - mode, cmd, argp); + err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); if (err == -ENOTTY) err = generic_ide_ioctl(drive, bdev, cmd, arg); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index a46dddf6107..026f9aa789e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -321,7 +321,8 @@ static int intel_idle_probe(void) cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || + !mwait_substates) return -ENODEV; pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); @@ -367,7 +368,7 @@ static int intel_idle_probe(void) if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; else { - smp_call_function(__setup_broadcast_timer, (void *)true, 1); + on_each_cpu(__setup_broadcast_timer, (void *)true, 1); register_cpu_notifier(&setup_broadcast_notifier); } @@ -459,7 +460,7 @@ static int intel_idle_cpuidle_devices_init(void) } } if (auto_demotion_disable_flags) - smp_call_function(auto_demotion_disable, NULL, 1); + on_each_cpu(auto_demotion_disable, NULL, 1); return 0; } @@ -499,7 +500,7 @@ static void __exit intel_idle_exit(void) cpuidle_unregister_driver(&intel_idle_driver); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { - smp_call_function(__setup_broadcast_timer, (void *)false, 1); + on_each_cpu(__setup_broadcast_timer, (void *)false, 1); unregister_cpu_notifier(&setup_broadcast_notifier); } diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 8e21d457b89..f2a84c6f854 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -215,7 +215,9 @@ static int addr4_resolve(struct sockaddr_in *src_in, neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); if (!neigh || !(neigh->nud_state & NUD_VALID)) { - neigh_event_send(rt->dst.neighbour, NULL); + rcu_read_lock(); + neigh_event_send(dst_get_neighbour(&rt->dst), NULL); + rcu_read_unlock(); ret = -ENODATA; if (neigh) goto release; @@ -273,14 +275,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, goto put; } - neigh = dst->neighbour; + rcu_read_lock(); + neigh = dst_get_neighbour(dst); if (!neigh || !(neigh->nud_state & NUD_VALID)) { - neigh_event_send(dst->neighbour, NULL); + if (neigh) + neigh_event_send(neigh, NULL); ret = -ENODATA; - goto put; + } else { + ret = rdma_copy_addr(addr, dst->dev, neigh->ha); } - - ret = rdma_copy_addr(addr, dst->dev, neigh->ha); + rcu_read_unlock(); put: dst_release(dst); return ret; diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index 4a5abaf0a25..9227f4acd79 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; return netlink_dump_start(nls, skb, nlh, client->cb_table[op].dump, - NULL); + NULL, 0); } } diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 2332dc22aa0..e55ce7a428b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1328,6 +1328,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) struct iwch_ep *child_ep, *parent_ep = ctx; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int hwtid = GET_TID(req); + struct neighbour *neigh; struct dst_entry *dst; struct l2t_entry *l2t; struct rtable *rt; @@ -1364,7 +1365,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto reject; } dst = &rt->dst; - l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); + rcu_read_lock(); + neigh = dst_get_neighbour(dst); + l2t = t3_l2t_get(tdev, neigh, neigh->dev); + rcu_read_unlock(); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1874,10 +1878,11 @@ static int is_loopback_dst(struct iw_cm_id *cm_id) int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { - int err = 0; struct iwch_dev *h = to_iwch_dev(cm_id->device); + struct neighbour *neigh; struct iwch_ep *ep; struct rtable *rt; + int err = 0; if (is_loopback_dst(cm_id)) { err = -ENOSYS; @@ -1933,9 +1938,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } ep->dst = &rt->dst; + rcu_read_lock(); + neigh = dst_get_neighbour(ep->dst); + /* get a l2t entry */ - ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, - ep->dst->neighbour->dev); + ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev); + rcu_read_unlock(); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 31fb44085c9..267005d0e66 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1325,6 +1325,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); + struct neighbour *neigh; struct dst_entry *dst; struct l2t_entry *l2t; struct rtable *rt; @@ -1357,11 +1358,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } dst = &rt->dst; - if (dst->neighbour->dev->flags & IFF_LOOPBACK) { + rcu_read_lock(); + neigh = dst_get_neighbour(dst); + if (neigh->dev->flags & IFF_LOOPBACK) { pdev = ip_dev_find(&init_net, peer_ip); BUG_ON(!pdev); - l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, - pdev, 0); + l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0); mtu = pdev->mtu; tx_chan = cxgb4_port_chan(pdev); smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; @@ -1372,18 +1374,18 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; dev_put(pdev); } else { - l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, - dst->neighbour->dev, 0); + l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0); mtu = dst_mtu(dst); - tx_chan = cxgb4_port_chan(dst->neighbour->dev); - smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; + tx_chan = cxgb4_port_chan(neigh->dev); + smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; - txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; - ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev); + txq_idx = cxgb4_port_idx(neigh->dev) * step; + ctrlq_idx = cxgb4_port_idx(neigh->dev); step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; rss_qid = dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(dst->neighbour->dev) * step]; + cxgb4_port_idx(neigh->dev) * step]; } + rcu_read_unlock(); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1847,6 +1849,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct c4iw_ep *ep; struct rtable *rt; struct net_device *pdev; + struct neighbour *neigh; int step; if ((conn_param->ord > c4iw_max_read_depth) || @@ -1908,14 +1911,16 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } ep->dst = &rt->dst; + rcu_read_lock(); + neigh = dst_get_neighbour(ep->dst); + /* get a l2t entry */ - if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { + if (neigh->dev->flags & IFF_LOOPBACK) { PDBG("%s LOOPBACK\n", __func__); pdev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr); ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, - ep->dst->neighbour, - pdev, 0); + neigh, pdev, 0); ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; @@ -1930,21 +1935,20 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) dev_put(pdev); } else { ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, - ep->dst->neighbour, - ep->dst->neighbour->dev, 0); + neigh, neigh->dev, 0); ep->mtu = dst_mtu(ep->dst); - ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); - ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) & - 0x7F) << 1; + ep->tx_chan = cxgb4_port_chan(neigh->dev); + ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1; step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan; - ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; - ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev); + ep->txq_idx = cxgb4_port_idx(neigh->dev) * step; + ep->ctrlq_idx = cxgb4_port_idx(neigh->dev); step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ - cxgb4_port_idx(ep->dst->neighbour->dev) * step]; + cxgb4_port_idx(neigh->dev) * step]; } + rcu_read_unlock(); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; @@ -2312,6 +2316,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); + if (!ep) { + printk(KERN_WARNING MOD + "Abort on non-existent endpoint, tid %d\n", tid); + kfree_skb(skb); + return 0; + } if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 57ffa50f509..44fc3104e91 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -255,12 +255,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_SUCCESS; /* - * Don't process SMInfo queries or vendor-specific - * MADs -- the SMA can't handle them. + * Don't process SMInfo queries -- the SMA can't handle them. */ - if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || - ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == - IB_SMP_ATTR_VENDOR_MASK)) + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) return IB_MAD_RESULT_SUCCESS; } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 2001f20a436..23c04ff6519 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1301,7 +1301,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, int is_eth; int is_vlan = 0; int is_grh; - u16 vlan; + u16 vlan = 0; send_size = 0; for (i = 0; i < wr->num_sge; ++i) diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 6fe79876009..6e3027387e2 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -511,6 +511,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *); int nes_destroy_cqp(struct nes_device *); int nes_nic_cm_xmit(struct sk_buff *, struct net_device *); void nes_recheck_link_status(struct work_struct *work); +void nes_terminate_timeout(unsigned long context); /* nes_nic.c */ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *); diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index e74cdf9ef47..a1f74f6381b 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1150,9 +1150,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi neigh_release(neigh); } - if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) - neigh_event_send(rt->dst.neighbour, NULL); - + if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) { + rcu_read_lock(); + neigh_event_send(dst_get_neighbour(&rt->dst), NULL); + rcu_read_unlock(); + } ip_rt_put(rt); return rc; } diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 96fa9a4cafd..ba4814a21ab 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, static void process_critical_error(struct nes_device *nesdev); static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); -static void nes_terminate_timeout(unsigned long context); static void nes_terminate_start_timer(struct nes_qp *nesqp); #ifdef CONFIG_INFINIBAND_NES_DEBUG @@ -3496,7 +3495,7 @@ static void nes_terminate_received(struct nes_device *nesdev, } /* Timeout routine in case terminate fails to complete */ -static void nes_terminate_timeout(unsigned long context) +void nes_terminate_timeout(unsigned long context) { struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context; @@ -3506,11 +3505,7 @@ static void nes_terminate_timeout(unsigned long context) /* Set a timer in case hw cannot complete the terminate sequence */ static void nes_terminate_start_timer(struct nes_qp *nesqp) { - init_timer(&nesqp->terminate_timer); - nesqp->terminate_timer.function = nes_terminate_timeout; - nesqp->terminate_timer.expires = jiffies + HZ; - nesqp->terminate_timer.data = (unsigned long)nesqp; - add_timer(&nesqp->terminate_timer); + mod_timer(&nesqp->terminate_timer, (jiffies + HZ)); } /** diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 95ca93ceeda..59db49fbec8 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1414,6 +1414,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, } nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR); + init_timer(&nesqp->terminate_timer); + nesqp->terminate_timer.function = nes_terminate_timeout; + nesqp->terminate_timer.data = (unsigned long)nesqp; /* update the QP table */ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; @@ -1423,7 +1426,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, return &nesqp->ibqp; } - /** * nes_clean_cq */ @@ -2568,6 +2570,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ibmr; case IWNES_MEMREG_TYPE_QP: case IWNES_MEMREG_TYPE_CQ: + if (!region->length) { + nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n"); + ib_umem_release(region); + return ERR_PTR(-EINVAL); + } nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL); if (!nespbl) { nes_debug(NES_DBG_MR, "Unable to allocate PBL\n"); diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d8ca0a0b970..65df26ce538 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd) static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, u32 updegr, u32 egrhd, u32 npkts) { - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); + mmiowb(); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + mmiowb(); } static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index c765a2eb04c..759bb63bb3b 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -2704,9 +2704,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, u32 updegr, u32 egrhd, u32 npkts) { - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); + mmiowb(); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + mmiowb(); } static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 8ec5237031a..49e4a589479 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -4060,10 +4060,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, */ if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) adjust_rcv_timeout(rcd, npkts); - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); + mmiowb(); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + mmiowb(); } static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 7b6985a2e65..936804efb77 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -44,6 +44,7 @@ #include #include +#include #include @@ -117,8 +118,9 @@ struct ipoib_header { u16 reserved; }; -struct ipoib_pseudoheader { - u8 hwaddr[INFINIBAND_ALEN]; +struct ipoib_cb { + struct qdisc_skb_cb qdisc_cb; + u8 hwaddr[INFINIBAND_ALEN]; }; /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 39913a065f9..073acdf9c19 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -753,9 +753,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", tx->qp->qp_num); - if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) - ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); + rc = ib_req_notify_cq(priv->send_cq, + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); + if (rc < 0) + ipoib_warn(priv, "request notify on send CQ failed\n"); + else if (rc) + ipoib_send_comp_handler(priv->send_cq, dev); } } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 86addca9ddf..6ea960015e0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -148,7 +148,7 @@ static int ipoib_stop(struct net_device *dev) netif_stop_queue(dev); - ipoib_ib_dev_down(dev, 0); + ipoib_ib_dev_down(dev, 1); ipoib_ib_dev_stop(dev, 0); if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { @@ -555,14 +555,17 @@ static int path_rec_start(struct net_device *dev, return 0; } +/* called with rcu_read_lock */ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_path *path; struct ipoib_neigh *neigh; + struct neighbour *n; unsigned long flags; - neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev); + n = dst_get_neighbour(skb_dst(skb)); + neigh = ipoib_neigh_alloc(n, skb->dev); if (!neigh) { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); @@ -571,9 +574,9 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&priv->lock, flags); - path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4); + path = __path_find(dev, n->ha + 4); if (!path) { - path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4); + path = path_rec_create(dev, n->ha + 4); if (!path) goto err_path; @@ -607,7 +610,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) } } else { spin_unlock_irqrestore(&priv->lock, flags); - ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); + ipoib_send(dev, skb, path->ah, IPOIB_QPN(n->ha)); return; } } else { @@ -634,24 +637,28 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); } +/* called with rcu_read_lock */ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(skb->dev); + struct dst_entry *dst = skb_dst(skb); + struct neighbour *n; /* Look up path record for unicasts */ - if (skb_dst(skb)->neighbour->ha[4] != 0xff) { + n = dst_get_neighbour(dst); + if (n->ha[4] != 0xff) { neigh_add_path(skb, dev); return; } /* Add in the P_Key for multicasts */ - skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff; - skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff; - ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb); + n->ha[8] = (priv->pkey >> 8) & 0xff; + n->ha[9] = priv->pkey & 0xff; + ipoib_mcast_send(dev, n->ha + 4, skb); } static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, - struct ipoib_pseudoheader *phdr) + struct ipoib_cb *cb) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_path *path; @@ -659,17 +666,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, spin_lock_irqsave(&priv->lock, flags); - path = __path_find(dev, phdr->hwaddr + 4); + path = __path_find(dev, cb->hwaddr + 4); if (!path || !path->valid) { int new_path = 0; if (!path) { - path = path_rec_create(dev, phdr->hwaddr + 4); + path = path_rec_create(dev, cb->hwaddr + 4); new_path = 1; } if (path) { - /* put pseudoheader back on for next time */ - skb_push(skb, sizeof *phdr); __skb_queue_tail(&path->queue, skb); if (!path->query && path_rec_start(dev, path)) { @@ -693,12 +698,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, be16_to_cpu(path->pathrec.dlid)); spin_unlock_irqrestore(&priv->lock, flags); - ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); + ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); return; } else if ((path->query || !path_rec_start(dev, path)) && skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { - /* put pseudoheader back on for next time */ - skb_push(skb, sizeof *phdr); __skb_queue_tail(&path->queue, skb); } else { ++dev->stats.tx_dropped; @@ -712,18 +715,23 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; + struct neighbour *n = NULL; unsigned long flags; - if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) { - if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) { + rcu_read_lock(); + if (likely(skb_dst(skb))) + n = dst_get_neighbour(skb_dst(skb)); + + if (likely(n)) { + if (unlikely(!*to_ipoib_neigh(n))) { ipoib_path_lookup(skb, dev); - return NETDEV_TX_OK; + goto unlock; } - neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour); + neigh = *to_ipoib_neigh(n); if (unlikely((memcmp(&neigh->dgid.raw, - skb_dst(skb)->neighbour->ha + 4, + n->ha + 4, sizeof(union ib_gid))) || (neigh->dev != dev))) { spin_lock_irqsave(&priv->lock, flags); @@ -740,17 +748,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ipoib_neigh_free(dev, neigh); spin_unlock_irqrestore(&priv->lock, flags); ipoib_path_lookup(skb, dev); - return NETDEV_TX_OK; + goto unlock; } if (ipoib_cm_get(neigh)) { if (ipoib_cm_up(neigh)) { ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); - return NETDEV_TX_OK; + goto unlock; } } else if (neigh->ah) { - ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); - return NETDEV_TX_OK; + ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha)); + goto unlock; } if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { @@ -762,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); } } else { - struct ipoib_pseudoheader *phdr = - (struct ipoib_pseudoheader *) skb->data; - skb_pull(skb, sizeof *phdr); + struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; - if (phdr->hwaddr[4] == 0xff) { + if (cb->hwaddr[4] == 0xff) { /* Add in the P_Key for multicast*/ - phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; - phdr->hwaddr[9] = priv->pkey & 0xff; + cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; + cb->hwaddr[9] = priv->pkey & 0xff; - ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); + ipoib_mcast_send(dev, cb->hwaddr + 4, skb); } else { /* unicast GID -- should be ARP or RARP reply */ @@ -780,17 +786,18 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n", skb_dst(skb) ? "neigh" : "dst", be16_to_cpup((__be16 *) skb->data), - IPOIB_QPN(phdr->hwaddr), - phdr->hwaddr + 4); + IPOIB_QPN(cb->hwaddr), + cb->hwaddr + 4); dev_kfree_skb_any(skb); ++dev->stats.tx_dropped; - return NETDEV_TX_OK; + goto unlock; } - unicast_arp_send(skb, dev, phdr); + unicast_arp_send(skb, dev, cb); } } - +unlock: + rcu_read_unlock(); return NETDEV_TX_OK; } @@ -819,14 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb, header->reserved = 0; /* - * If we don't have a neighbour structure, stuff the - * destination address onto the front of the skb so we can - * figure out where to send the packet later. + * If we don't have a dst_entry structure, stuff the + * destination address into skb->cb so we can figure out where + * to send the packet later. */ - if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) { - struct ipoib_pseudoheader *phdr = - (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); - memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); + if (!skb_dst(skb)) { + struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; + memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); } return 0; @@ -1002,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev) dev->flags |= IFF_BROADCAST | IFF_MULTICAST; - /* - * We add in INFINIBAND_ALEN to allow for the destination - * address "pseudoheader" for skbs without neighbour struct. - */ - dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; + dev->hard_header_len = IPOIB_ENCAP_LEN; dev->addr_len = INFINIBAND_ALEN; dev->type = ARPHRD_INFINIBAND; dev->tx_queue_len = ipoib_sendq_size * 2; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3871ac66355..fc045946298 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -189,7 +189,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, mcast->mcmember = *mcmember; - /* Set the cached Q_Key before we attach if it's the broadcast group */ + /* Set the multicast MTU and cached Q_Key before we attach if it's + * the broadcast group. + */ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, sizeof (union ib_gid))) { spin_lock_irq(&priv->lock); @@ -197,10 +199,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, spin_unlock_irq(&priv->lock); return -EAGAIN; } + priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey); spin_unlock_irq(&priv->lock); priv->tx_wr.wr.ud.remote_qkey = priv->qkey; set_qkey = 1; + + if (!ipoib_cm_admin_enabled(dev)) { + rtnl_lock(); + dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); + rtnl_unlock(); + } } if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { @@ -258,17 +267,14 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, netif_tx_lock_bh(dev); while (!skb_queue_empty(&mcast->pkt_queue)) { struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); + netif_tx_unlock_bh(dev); skb->dev = dev; - if (!skb_dst(skb) || !skb_dst(skb)->neighbour) { - /* put pseudoheader back on for next time */ - skb_push(skb, sizeof (struct ipoib_pseudoheader)); - } - if (dev_queue_xmit(skb)) ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); + netif_tx_lock_bh(dev); } netif_tx_unlock_bh(dev); @@ -589,14 +595,6 @@ void ipoib_mcast_join_task(struct work_struct *work) return; } - priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); - - if (!ipoib_cm_admin_enabled(dev)) { - rtnl_lock(); - dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu)); - rtnl_unlock(); - } - ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n"); clear_bit(IPOIB_MCAST_RUN, &priv->flags); @@ -715,11 +713,15 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) out: if (mcast && mcast->ah) { - if (skb_dst(skb) && - skb_dst(skb)->neighbour && - !*to_ipoib_neigh(skb_dst(skb)->neighbour)) { - struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, - skb->dev); + struct dst_entry *dst = skb_dst(skb); + struct neighbour *n = NULL; + + rcu_read_lock(); + if (dst) + n = dst_get_neighbour(dst); + if (n && !*to_ipoib_neigh(n)) { + struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, + skb->dev); if (neigh) { kref_get(&mcast->ah->ref); @@ -727,7 +729,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) list_add_tail(&neigh->list, &mcast->neigh_list); } } - + rcu_read_unlock(); spin_unlock_irqrestore(&priv->lock, flags); ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); return; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 8db008de539..f8f57583f5d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -354,6 +354,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, } ib_conn = ep->dd_data; + if (iser_alloc_rx_descriptors(ib_conn)) + return -ENOMEM; + /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ @@ -388,19 +391,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iser_conn->ib_conn = NULL; } -static int -iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - int err; - - err = iser_conn_set_full_featured_mode(conn); - if (err) - return err; - - return iscsi_conn_start(cls_conn); -} - static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); @@ -686,7 +676,7 @@ static struct iscsi_transport iscsi_iser_transport = { .get_conn_param = iscsi_conn_get_param, .get_ep_param = iscsi_iser_get_ep_param, .get_session_param = iscsi_session_get_param, - .start_conn = iscsi_iser_conn_start, + .start_conn = iscsi_conn_start, .stop_conn = iscsi_iser_conn_stop, /* iscsi host params */ .get_host_param = iscsi_host_get_param, diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 2f02ab0ccc1..634aef039fe 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -365,4 +365,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); #endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 95a08a8ca8a..eb1ee6f8d89 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, } -static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) { int i, j; u64 dma_addr; @@ -236,23 +236,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) kfree(ib_conn->rx_descs); } -/** - * iser_conn_set_full_featured_mode - (iSER API) - */ -int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) +static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) { struct iscsi_iser_conn *iser_conn = conn->dd_data; - iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); - - /* Check that there is no posted recv or send buffers left - */ - /* they must be consumed during the login phase */ - BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0); - BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); + /* check if this is the last login - going to full feature phase */ + if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) + return 0; - if (iser_alloc_rx_descriptors(iser_conn->ib_conn)) - return -ENOMEM; + /* + * Check that there is one posted recv buffer (for the last login + * response) and no posted send buffers left - they must have been + * consumed during previous login phases. + */ + WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); + WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); + iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); /* Initial post receive buffers */ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) return -ENOMEM; @@ -421,6 +422,9 @@ int iser_send_control(struct iscsi_conn *conn, err = iser_post_recvl(iser_conn->ib_conn); if (err) goto send_control_error; + err = iser_post_rx_bufs(conn, task->hdr); + if (err) + goto send_control_error; } err = iser_post_send(iser_conn->ib_conn, mdesc); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 7d5109bbd1a..aa5eafa194a 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -568,24 +568,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scmnd->sc_data_direction); } -static void srp_remove_req(struct srp_target_port *target, - struct srp_request *req, s32 req_lim_delta) +/** + * srp_claim_req - Take ownership of the scmnd associated with a request. + * @target: SRP target port. + * @req: SRP request. + * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take + * ownership of @req->scmnd if it equals @scmnd. + * + * Return value: + * Either NULL or a pointer to the SCSI command the caller became owner of. + */ +static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, + struct srp_request *req, + struct scsi_cmnd *scmnd) { unsigned long flags; - srp_unmap_data(req->scmnd, target, req); + spin_lock_irqsave(&target->lock, flags); + if (!scmnd) { + scmnd = req->scmnd; + req->scmnd = NULL; + } else if (req->scmnd == scmnd) { + req->scmnd = NULL; + } else { + scmnd = NULL; + } + spin_unlock_irqrestore(&target->lock, flags); + + return scmnd; +} + +/** + * srp_free_req() - Unmap data and add request to the free request list. + */ +static void srp_free_req(struct srp_target_port *target, + struct srp_request *req, struct scsi_cmnd *scmnd, + s32 req_lim_delta) +{ + unsigned long flags; + + srp_unmap_data(scmnd, target, req); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_lim_delta; - req->scmnd = NULL; list_add_tail(&req->list, &target->free_reqs); spin_unlock_irqrestore(&target->lock, flags); } static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) { - req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); - srp_remove_req(target, req, 0); + struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); + + if (scmnd) { + srp_free_req(target, req, scmnd, 0); + scmnd->result = DID_RESET << 16; + scmnd->scsi_done(scmnd); + } } static int srp_reconnect_target(struct srp_target_port *target) @@ -1055,11 +1093,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) complete(&target->tsk_mgmt_done); } else { req = &target->req_ring[rsp->tag]; - scmnd = req->scmnd; - if (!scmnd) + scmnd = srp_claim_req(target, req, NULL); + if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); + + spin_lock_irqsave(&target->lock, flags); + target->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&target->lock, flags); + + return; + } scmnd->result = rsp->status; if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { @@ -1074,7 +1119,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); - srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); + srp_free_req(target, req, scmnd, + be32_to_cpu(rsp->req_lim_delta)); + scmnd->host_scribble = NULL; scmnd->scsi_done(scmnd); } @@ -1613,25 +1660,18 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; - int ret = SUCCESS; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (!req || target->qp_in_error) - return FAILED; - if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, - SRP_TSK_ABORT_TASK)) + if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) return FAILED; + srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + SRP_TSK_ABORT_TASK); + srp_free_req(target, req, scmnd, 0); + scmnd->result = DID_ABORT << 16; + scmnd->scsi_done(scmnd); - if (req->scmnd) { - if (!target->tsk_mgmt_status) { - srp_remove_req(target, req, 0); - scmnd->result = DID_ABORT << 16; - } else - ret = FAILED; - } - - return ret; + return SUCCESS; } static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index 4dfa1eed4b7..f8f892b076e 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev) struct walkera_dev *w = input_get_drvdata(dev); parport_disable_irq(w->parport); + hrtimer_cancel(&w->timer); } static int walkera0701_connect(struct walkera_dev *w, int parport) @@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) if (parport_claim(w->pardevice)) goto init_err1; + hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + w->timer.function = timer_handler; + w->input_dev = input_allocate_device(); if (!w->input_dev) goto init_err2; @@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) if (err) goto init_err3; - hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - w->timer.function = timer_handler; return 0; init_err3: @@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport) static void walkera0701_disconnect(struct walkera_dev *w) { - hrtimer_cancel(&w->timer); input_unregister_device(w->input_dev); parport_release(w->pardevice); parport_unregister_device(w->pardevice); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 56abf3d0e91..92c7be14bd4 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -142,6 +142,7 @@ static const struct xpad_device { { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX }, { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX }, + { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX }, { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX }, diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 99d58764ef0..0b9944346ec 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -426,7 +426,9 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int /* * First try "E6 report". - * ALPS should return 0,0,10 or 0,0,100 + * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed. + * The bits 0-2 of the first byte will be 1s if some buttons are + * pressed. */ param[0] = 0; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) || @@ -441,7 +443,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int dbg("E6 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); - if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100)) + if ((param[0] & 0xf8) != 0 || param[1] != 0 || + (param[2] != 10 && param[2] != 100)) return NULL; /* diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 3126983c004..13e38ffec35 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -373,6 +373,9 @@ static void setup_events_to_report(struct input_dev *input_dev, __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); + if (cfg->caps & HAS_INTEGRATED_BUTTON) + __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); + input_set_events_per_packet(input_dev, 60); } diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index bb9f5d31f0d..20153fe8151 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), }, }, + { + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + }, + }, + { + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), @@ -319,6 +333,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), @@ -880,6 +900,7 @@ static int __init i8042_platform_init(void) int retval; #ifdef CONFIG_X86 + u8 a20_on = 0xdf; /* Just return if pre-detection shows no i8042 controller exist */ if (!x86_platform.i8042_detect()) return -ENODEV; @@ -919,6 +940,14 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; + + /* + * A20 was already enabled during early kernel init. But some buggy + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to + * resume from S3. So we do it here and hope that nothing breaks. + */ + i8042_command(&a20_on, 0x10d1); + i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */ #endif /* CONFIG_X86 */ return retval; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 08ba5ad9c9b..a28ebf08560 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -242,7 +242,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); if (wacom->tool[0] != BTN_TOOL_MOUSE) { - input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8)); + input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8)); input_report_key(input, BTN_TOUCH, data[1] & 0x01); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x04); diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 3913f47ef86..492aa520b9d 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -616,7 +616,13 @@ static void int_in_work(struct work_struct *work) if (rc == 0) /* success, resubmit interrupt read URB */ rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc != 0 && rc != -ENODEV) { + + switch (rc) { + case 0: /* success */ + case -ENODEV: /* device gone */ + case -EINVAL: /* URB already resubmitted, or terminal badness */ + break; + default: /* failure: try to recover by resetting the device */ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc)); rc = usb_lock_device_for_reset(ucs->udev, ucs->interface); if (rc == 0) { @@ -2437,7 +2443,9 @@ static void gigaset_disconnect(struct usb_interface *interface) } /* gigaset_suspend - * This function is called before the USB connection is suspended. + * This function is called before the USB connection is suspended + * or before the USB device is reset. + * In the latter case, message == PMSG_ON. */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) { @@ -2493,7 +2501,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) del_timer_sync(&ucs->timer_atrdy); del_timer_sync(&ucs->timer_cmd_in); del_timer_sync(&ucs->timer_int_in); - cancel_work_sync(&ucs->int_in_wq); + + /* don't try to cancel int_in_wq from within reset as it + * might be the one requesting the reset + */ + if (message.event != PM_EVENT_ON) + cancel_work_sync(&ucs->int_in_wq); gig_dbg(DEBUG_SUSPEND, "suspend complete"); return 0; diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 658e75f18d0..483ac00ae35 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -14,6 +14,7 @@ #include "gigaset.h" #include #include +#include #include #include #include @@ -222,10 +223,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl) static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) { #ifdef CONFIG_GIGASET_DEBUG + /* dump at most 20 messages in 20 secs */ + static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20); _cdebbuf *cdb; if (!(gigaset_debuglevel & level)) return; + if (!___ratelimit(&msg_dump_ratelimit, tag)) + return; cdb = capi_cmsg2str(p); if (cdb) { @@ -258,6 +263,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag, CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, CAPIMSG_CONTROL(data)); l -= 12; + if (l <= 0) + return; dbgline = kmalloc(3*l, GFP_ATOMIC); if (!dbgline) return; @@ -2057,12 +2064,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif, CapiResetProcedureNotSupportedByCurrentProtocol); } -/* - * dump unsupported/ignored messages at most twice per minute, - * some apps send those very frequently - */ -static unsigned long ignored_msg_dump_time; - /* * unsupported CAPI message handler */ @@ -2072,8 +2073,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif, { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); - if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) - dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); + dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); } @@ -2084,11 +2084,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { - if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) { - /* decode message */ - capi_message2cmsg(&iif->acmsg, skb->data); - dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); - } + /* decode message */ + capi_message2cmsg(&iif->acmsg, skb->data); + dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); dev_kfree_skb_any(skb); } diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index d497db0a26d..509135f225d 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -16,7 +16,6 @@ #include #include "isdnloop.h" -static char *revision = "$Revision: 1.11.6.7 $"; static char *isdnloop_id = "loop0"; MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1) static int __init isdnloop_init(void) { - char *p; - char rev[10]; - - if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); - p = strchr(rev, '$'); - *p = 0; - } else - strcpy(rev, " ??? "); - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev); - if (isdnloop_id) return (isdnloop_addcard(isdnloop_id)); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 574b09afedd..2eba9a12a6a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1897,7 +1897,9 @@ int bitmap_load(mddev_t *mddev) * re-add of a missing device */ start = mddev->recovery_cp; + mutex_lock(&mddev->bitmap_info.mutex); err = bitmap_init_from_disk(bitmap, start); + mutex_unlock(&mddev->bitmap_info.mutex); } if (err) goto out; @@ -1982,6 +1984,8 @@ location_store(mddev_t *mddev, const char *buf, size_t len) if (mddev->pers) { mddev->pers->quiesce(mddev, 1); rv = bitmap_create(mddev); + if (!rv) + rv = bitmap_load(mddev); if (rv) { bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index c8827ffd85b..6f906bc9328 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -177,7 +177,6 @@ struct crypt_config { #define MIN_IOS 16 #define MIN_POOL_PAGES 32 -#define MIN_BIO_PAGES 8 static struct kmem_cache *_crypt_io_pool; @@ -849,12 +848,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, } /* - * if additional pages cannot be allocated without waiting, - * return a partially allocated bio, the caller will then try - * to allocate additional bios while submitting this partial bio + * If additional pages cannot be allocated without waiting, + * return a partially-allocated bio. The caller will then try + * to allocate more bios while submitting this partial bio. */ - if (i == (MIN_BIO_PAGES - 1)) - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; len = (size > PAGE_SIZE) ? PAGE_SIZE : size; @@ -1047,16 +1045,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) queue_work(cc->io_queue, &io->work); } -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, - int error, int async) +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->target->private; - if (unlikely(error < 0)) { + if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); bio_put(clone); - io->error = -EIO; crypt_dec_pending(io); return; } @@ -1107,12 +1103,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) sector += bio_sectors(clone); crypt_inc_pending(io); + r = crypt_convert(cc, &io->ctx); + if (r < 0) + io->error = -EIO; + crypt_finished = atomic_dec_and_test(&io->ctx.pending); /* Encryption was already finished, submit io now */ if (crypt_finished) { - kcryptd_crypt_write_io_submit(io, r, 0); + kcryptd_crypt_write_io_submit(io, 0); /* * If there was an error, do not try next fragments. @@ -1163,11 +1163,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_dec_pending(io); } -static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) +static void kcryptd_crypt_read_done(struct dm_crypt_io *io) { - if (unlikely(error < 0)) - io->error = -EIO; - crypt_dec_pending(io); } @@ -1182,9 +1179,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) io->sector); r = crypt_convert(cc, &io->ctx); + if (r < 0) + io->error = -EIO; if (atomic_dec_and_test(&io->ctx.pending)) - kcryptd_crypt_read_done(io, r); + kcryptd_crypt_read_done(io); crypt_dec_pending(io); } @@ -1205,15 +1204,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); + if (error < 0) + io->error = -EIO; + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->pending)) return; if (bio_data_dir(io->base_bio) == READ) - kcryptd_crypt_read_done(io, error); + kcryptd_crypt_read_done(io); else - kcryptd_crypt_write_io_submit(io, error, 1); + kcryptd_crypt_write_io_submit(io, 1); } static void kcryptd_crypt(struct work_struct *work) diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 0bdb201c2c2..7344534294a 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -282,7 +282,7 @@ int dm_exception_store_init(void) return 0; persistent_fail: - dm_persistent_snapshot_exit(); + dm_transient_snapshot_exit(); transient_fail: return r; } diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index ea790623c30..3e90b8014f9 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -149,8 +149,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type, static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct flakey_c *fc = ti->private; + struct dm_dev *dev = fc->dev; + int r = 0; - return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg); + /* + * Only pass ioctls through if the device sizes match exactly. + */ + if (fc->start || + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + r = scsi_verify_blk_ioctl(NULL, cmd); + + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); } static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm, diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index dc4433b13db..e0e775f41b7 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -296,6 +296,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, unsigned offset; unsigned num_bvecs; sector_t remaining = where->count; + struct request_queue *q = bdev_get_queue(where->bdev); + sector_t discard_sectors; /* * where->count may be zero if rw holds a flush and we need to @@ -305,9 +307,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, /* * Allocate a suitably sized-bio. */ - num_bvecs = dm_sector_div_up(remaining, - (PAGE_SIZE >> SECTOR_SHIFT)); - num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); + if (rw & REQ_DISCARD) + num_bvecs = 1; + else + num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), + dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); + bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); if (!bio) { printk(KERN_WARNING "%s : %s() failed\n", __FILE__, __func__); @@ -319,10 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, bio->bi_destructor = dm_bio_destructor; store_io_and_region_in_bio(bio, io, region); - /* - * Try and add as many pages as possible. - */ - while (remaining) { + if (rw & REQ_DISCARD) { + discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); + bio->bi_size = discard_sectors << SECTOR_SHIFT; + remaining -= discard_sectors; + } else while (remaining) { + /* + * Try and add as many pages as possible. + */ dp->get_page(dp, &page, &len, &offset); len = min(len, to_bytes(remaining)); if (!bio_add_page(bio, page, len, offset)) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 4cacdad2270..bd3b294eead 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1524,6 +1524,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) if (copy_from_user(dmi, user, tmp.data_size)) goto bad; + /* + * Abort if something changed the ioctl data while it was being copied. + */ + if (dmi->data_size != tmp.data_size) { + DMERR("rejecting ioctl: data size modified while processing parameters"); + goto bad; + } + /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, tmp.data_size)) goto bad; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 3921e3bb43c..9728839f844 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct linear_c *lc = (struct linear_c *) ti->private; - return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg); + struct dm_dev *dev = lc->dev; + int r = 0; + + /* + * Only pass ioctls through if the device sizes match exactly. + */ + if (lc->start || + ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) + r = scsi_verify_blk_ioctl(NULL, cmd); + + return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg); } static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 209991bebd3..70373bfa20b 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1584,6 +1584,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, spin_unlock_irqrestore(&m->lock, flags); + /* + * Only pass ioctls through if the device sizes match exactly. + */ + if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) + r = scsi_verify_blk_ioctl(NULL, cmd); + return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index e5d8904fc8f..437ae1825f1 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -468,6 +468,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) INIT_WORK(&rs->md.event_work, do_table_event); ti->split_io = rs->md.chunk_sectors; ti->private = rs; + ti->num_flush_requests = 1; mutex_lock(&rs->md.reconfig_mutex); ret = md_run(&rs->md); diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9bfd057be68..42ef54f9260 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1210,7 +1210,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - if (!(bio->bi_rw & REQ_FLUSH)) + if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) dm_rh_dec(ms->rh, map_context->ll); return error; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 7771ed21218..69732e03eb3 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) return; } + if (bio->bi_rw & REQ_DISCARD) + return; + /* We must inform the log that the sync count has changed. */ log->type->set_region_sync(log, region, 0); @@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & REQ_FLUSH) + if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 9ecff5f3023..f3e632a483c 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; + r = -ENOMEM; goto bad_pending_pool; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 49b49bbe97a..cfed9d2204a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -745,8 +745,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) if (!md_in_flight(md)) wake_up(&md->wait); + /* + * Run this off this callpath, as drivers could invoke end_io while + * inside their request_fn (and holding the queue lock). Calling + * back into ->request_fn() could deadlock attempting to grab the + * queue lock again. + */ if (run_queue) - blk_run_queue(md->queue); + blk_run_queue_async(md->queue); /* * dm_put() must be at the end of this function. See the comment above @@ -856,10 +862,14 @@ static void dm_done(struct request *clone, int error, bool mapped) { int r = error; struct dm_rq_target_io *tio = clone->end_io_data; - dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; + dm_request_endio_fn rq_end_io = NULL; - if (mapped && rq_end_io) - r = rq_end_io(tio->ti, clone, error, &tio->info); + if (tio->ti) { + rq_end_io = tio->ti->type->rq_end_io; + + if (mapped && rq_end_io) + r = rq_end_io(tio->ti, clone, error, &tio->info); + } if (r <= 0) /* The target wants to complete the I/O */ @@ -1577,15 +1587,6 @@ static int map_request(struct dm_target *ti, struct request *clone, int r, requeued = 0; struct dm_rq_target_io *tio = clone->end_io_data; - /* - * Hold the md reference here for the in-flight I/O. - * We can't rely on the reference count by device opener, - * because the device may be closed during the request completion - * when all bios are completed. - * See the comment in rq_completed() too. - */ - dm_get(md); - tio->ti = ti; r = ti->type->map_rq(ti, clone, &tio->info); switch (r) { @@ -1617,6 +1618,26 @@ static int map_request(struct dm_target *ti, struct request *clone, return requeued; } +static struct request *dm_start_request(struct mapped_device *md, struct request *orig) +{ + struct request *clone; + + blk_start_request(orig); + clone = orig->special; + atomic_inc(&md->pending[rq_data_dir(clone)]); + + /* + * Hold the md reference here for the in-flight I/O. + * We can't rely on the reference count by device opener, + * because the device may be closed during the request completion + * when all bios are completed. + * See the comment in rq_completed() too. + */ + dm_get(md); + + return clone; +} + /* * q->request_fn for request-based dm. * Called with the queue lock held. @@ -1646,14 +1667,21 @@ static void dm_request_fn(struct request_queue *q) pos = blk_rq_pos(rq); ti = dm_table_find_target(map, pos); - BUG_ON(!dm_target_is_valid(ti)); + if (!dm_target_is_valid(ti)) { + /* + * Must perform setup, that dm_done() requires, + * before calling dm_kill_unmapped_request + */ + DMERR_LIMIT("request attempted access beyond the end of device"); + clone = dm_start_request(md, rq); + dm_kill_unmapped_request(clone, -EIO); + continue; + } if (ti->type->busy && ti->type->busy(ti)) goto delay_and_out; - blk_start_request(rq); - clone = rq->special; - atomic_inc(&md->pending[rq_data_dir(clone)]); + clone = dm_start_request(md, rq); spin_unlock(q->queue_lock); if (map_request(ti, clone, md)) @@ -1673,8 +1701,6 @@ static void dm_request_fn(struct request_queue *q) blk_delay_queue(q, HZ / 10); out: dm_table_put(map); - - return; } int dm_underlying_device_busy(struct request_queue *q) diff --git a/drivers/md/md.c b/drivers/md/md.c index bc8342812d0..4ef75e9c7c7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -299,6 +299,10 @@ static int md_make_request(struct request_queue *q, struct bio *bio) bio_io_error(bio); return 0; } + if (mddev->ro == 1 && unlikely(rw == WRITE)) { + bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); + return 0; + } smp_rmb(); /* Ensure implications of 'active' are visible */ rcu_read_lock(); if (mddev->suspended) { @@ -348,6 +352,8 @@ void mddev_suspend(mddev_t *mddev) synchronize_rcu(); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); + + del_timer_sync(&mddev->safemode_timer); } EXPORT_SYMBOL_GPL(mddev_suspend); @@ -407,7 +413,7 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); + bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; bi->bi_bdev = rdev->bdev; @@ -1094,8 +1100,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ret = 0; } rdev->sectors = rdev->sb_start; - /* Limit to 4TB as metadata cannot record more than that */ - if (rdev->sectors >= (2ULL << 32)) + /* Limit to 4TB as metadata cannot record more than that. + * (not needed for Linear and RAID0 as metadata doesn't + * record this size) + */ + if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) rdev->sectors = (2ULL << 32) - 2; if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) @@ -1377,7 +1386,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) /* Limit to 4TB as metadata cannot record more than that. * 4TB == 2^32 KB, or 2*2^32 sectors. */ - if (num_sectors >= (2ULL << 32)) + if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e86bf3682e1..0e5084b7819 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -283,7 +283,7 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf) kfree(conf->strip_zone); kfree(conf->devlist); kfree(conf); - *private_conf = NULL; + *private_conf = ERR_PTR(err); return err; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3a9e59fe7ad..36f1ed313ae 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -614,9 +614,22 @@ static void wait_barrier(conf_t *conf) spin_lock_irq(&conf->resync_lock); if (conf->barrier) { conf->nr_waiting++; - wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + /* Wait for the barrier to drop. + * However if there are already pending + * requests (preventing the barrier from + * rising completely), and the + * pre-process bio queue isn't empty, + * then don't wait, as we need to empty + * that queue to get the nr_pending + * count down. + */ + wait_event_lock_irq(conf->wait_barrier, + !conf->barrier || + (conf->nr_pending && + current->bio_list && + !bio_list_empty(current->bio_list)), conf->resync_lock, - ); + ); conf->nr_waiting--; } conf->nr_pending++; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 17cb6ab6230..b65a7c50eb6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -667,9 +667,22 @@ static void wait_barrier(conf_t *conf) spin_lock_irq(&conf->resync_lock); if (conf->barrier) { conf->nr_waiting++; - wait_event_lock_irq(conf->wait_barrier, !conf->barrier, + /* Wait for the barrier to drop. + * However if there are already pending + * requests (preventing the barrier from + * rising completely), and the + * pre-process bio queue isn't empty, + * then don't wait, as we need to empty + * that queue to get the nr_pending + * count down. + */ + wait_event_lock_irq(conf->wait_barrier, + !conf->barrier || + (conf->nr_pending && + current->bio_list && + !bio_list_empty(current->bio_list)), conf->resync_lock, - ); + ); conf->nr_waiting--; } conf->nr_pending++; @@ -1845,6 +1858,12 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, /* want to reconstruct this device */ rb2 = r10_bio; sect = raid10_find_virt(conf, sector_nr, i); + if (sect >= mddev->resync_max_sectors) { + /* last stripe is not complete - don't + * try to recover this sector. + */ + continue; + } /* Unless we are doing a full sync, we only need * to recover the block if it is set in the bitmap */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1f6c68df6f3..cff955a0408 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -199,12 +199,14 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state)) + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) list_add_tail(&sh->lru, &conf->delayed_list); else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && sh->bm_seq - conf->seq_write > 0) list_add_tail(&sh->lru, &conf->bitmap_list); else { + clear_bit(STRIPE_DELAYED, &sh->state); clear_bit(STRIPE_BIT_DELAY, &sh->state); list_add_tail(&sh->lru, &conf->handle_list); } @@ -3846,7 +3848,6 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) raid_bio->bi_next = (void*)rdev; align_bi->bi_bdev = rdev->bdev; align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); - align_bi->bi_sector += rdev->data_offset; if (!bio_fits_rdev(align_bi)) { /* too big in some way */ @@ -3855,6 +3856,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio) return 0; } + /* No reshape active, so we can trust rdev->data_offset */ + align_bi->bi_sector += rdev->data_offset; + spin_lock_irq(&conf->device_lock); wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0, diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index f7328777595..d5cda35dcc7 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, if (minor == MAX_DVB_MINORS) { kfree(dvbdevfops); kfree(dvbdev); + up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); return -EINVAL; } diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c index 43971e63baa..aa63d687d27 100644 --- a/drivers/media/dvb/frontends/lgdt330x.c +++ b/drivers/media/dvb/frontends/lgdt330x.c @@ -104,8 +104,8 @@ static int i2c_write_demod_bytes (struct lgdt330x_state* state, * then reads the data returned for (len) bytes. */ -static u8 i2c_read_demod_bytes (struct lgdt330x_state* state, - enum I2C_REG reg, u8* buf, int len) +static int i2c_read_demod_bytes(struct lgdt330x_state *state, + enum I2C_REG reg, u8 *buf, int len) { u8 wr [] = { reg }; struct i2c_msg msg [] = { @@ -118,6 +118,8 @@ static u8 i2c_read_demod_bytes (struct lgdt330x_state* state, ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_WARNING "lgdt330x: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __func__, state->config->demod_address, reg, ret); + if (ret >= 0) + ret = -EIO; } else { ret = 0; } diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c index e5180e45d31..5d15c6b74d9 100644 --- a/drivers/media/dvb/mantis/mantis_dvb.c +++ b/drivers/media/dvb/mantis/mantis_dvb.c @@ -248,8 +248,10 @@ int __devinit mantis_dvb_init(struct mantis_pci *mantis) err5: tasklet_kill(&mantis->tasklet); dvb_net_release(&mantis->dvbnet); - dvb_unregister_frontend(mantis->fe); - dvb_frontend_detach(mantis->fe); + if (mantis->fe) { + dvb_unregister_frontend(mantis->fe); + dvb_frontend_detach(mantis->fe); + } err4: mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem); diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c index 0c8164a2cc3..3d2867d349b 100644 --- a/drivers/media/dvb/siano/smsusb.c +++ b/drivers/media/dvb/siano/smsusb.c @@ -480,7 +480,7 @@ static int smsusb_resume(struct usb_interface *intf) return 0; } -static const struct usb_device_id smsusb_id_table[] __devinitconst = { +static const struct usb_device_id smsusb_id_table[] = { { USB_DEVICE(0x187f, 0x0010), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, { USB_DEVICE(0x187f, 0x0100), @@ -541,6 +541,10 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = { .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { USB_DEVICE(0x2040, 0xc090), .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, + { USB_DEVICE(0x2040, 0xc0a0), + .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, + { USB_DEVICE(0x2040, 0xf5a0), + .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, { } /* Terminating entry */ }; diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c index a43ed6c41bf..12b91ae1b20 100644 --- a/drivers/media/rc/ene_ir.c +++ b/drivers/media/rc/ene_ir.c @@ -1017,22 +1017,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) spin_lock_init(&dev->hw_lock); - /* claim the resources */ - error = -EBUSY; - dev->hw_io = pnp_port_start(pnp_dev, 0); - if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { - dev->hw_io = -1; - dev->irq = -1; - goto error; - } - - dev->irq = pnp_irq(pnp_dev, 0); - if (request_irq(dev->irq, ene_isr, - IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { - dev->irq = -1; - goto error; - } - pnp_set_drvdata(pnp_dev, dev); dev->pnp_dev = pnp_dev; @@ -1085,6 +1069,22 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id) device_set_wakeup_capable(&pnp_dev->dev, true); device_set_wakeup_enable(&pnp_dev->dev, true); + /* claim the resources */ + error = -EBUSY; + dev->hw_io = pnp_port_start(pnp_dev, 0); + if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) { + dev->hw_io = -1; + dev->irq = -1; + goto error; + } + + dev->irq = pnp_irq(pnp_dev, 0); + if (request_irq(dev->irq, ene_isr, + IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) { + dev->irq = -1; + goto error; + } + error = rc_register_device(rdev); if (error < 0) goto error; diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c index 7f7079b12f2..4218f7369c5 100644 --- a/drivers/media/rc/fintek-cir.c +++ b/drivers/media/rc/fintek-cir.c @@ -504,16 +504,6 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id spin_lock_init(&fintek->fintek_lock); - ret = -EBUSY; - /* now claim resources */ - if (!request_region(fintek->cir_addr, - fintek->cir_port_len, FINTEK_DRIVER_NAME)) - goto failure; - - if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED, - FINTEK_DRIVER_NAME, (void *)fintek)) - goto failure; - pnp_set_drvdata(pdev, fintek); fintek->pdev = pdev; @@ -548,6 +538,16 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); + ret = -EBUSY; + /* now claim resources */ + if (!request_region(fintek->cir_addr, + fintek->cir_port_len, FINTEK_DRIVER_NAME)) + goto failure; + + if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED, + FINTEK_DRIVER_NAME, (void *)fintek)) + goto failure; + ret = rc_register_device(rdev); if (ret) goto failure; diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index ecd3d028076..d8e0b2d81c8 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -1477,6 +1477,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id rdev = rc_allocate_device(); if (!rdev) goto failure; + itdev->rdev = rdev; ret = -ENODEV; @@ -1519,16 +1520,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id /* initialize raw event */ init_ir_raw_event(&itdev->rawir); - ret = -EBUSY; - /* now claim resources */ - if (!request_region(itdev->cir_addr, - dev_desc->io_region_size, ITE_DRIVER_NAME)) - goto failure; - - if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED, - ITE_DRIVER_NAME, (void *)itdev)) - goto failure; - /* set driver data into the pnp device */ pnp_set_drvdata(pdev, itdev); itdev->pdev = pdev; @@ -1604,11 +1595,20 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id rdev->driver_name = ITE_DRIVER_NAME; rdev->map_name = RC_MAP_RC6_MCE; + ret = -EBUSY; + /* now claim resources */ + if (!request_region(itdev->cir_addr, + dev_desc->io_region_size, ITE_DRIVER_NAME)) + goto failure; + + if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED, + ITE_DRIVER_NAME, (void *)itdev)) + goto failure; + ret = rc_register_device(rdev); if (ret) goto failure; - itdev->rdev = rdev; ite_pr(KERN_NOTICE, "driver has been successfully loaded\n"); return 0; diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index 9fd019e6b9b..c212276202f 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c @@ -1027,24 +1027,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) spin_lock_init(&nvt->nvt_lock); spin_lock_init(&nvt->tx.lock); - ret = -EBUSY; - /* now claim resources */ - if (!request_region(nvt->cir_addr, - CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) - goto failure; - - if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, - NVT_DRIVER_NAME, (void *)nvt)) - goto failure; - - if (!request_region(nvt->cir_wake_addr, - CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) - goto failure; - - if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, - NVT_DRIVER_NAME, (void *)nvt)) - goto failure; - pnp_set_drvdata(pdev, nvt); nvt->pdev = pdev; @@ -1091,6 +1073,24 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) rdev->tx_resolution = XYZ; #endif + ret = -EBUSY; + /* now claim resources */ + if (!request_region(nvt->cir_addr, + CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) + goto failure; + + if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, + NVT_DRIVER_NAME, (void *)nvt)) + goto failure; + + if (!request_region(nvt->cir_wake_addr, + CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) + goto failure; + + if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, + NVT_DRIVER_NAME, (void *)nvt)) + goto failure; + ret = rc_register_device(rdev); if (ret) goto failure; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 3186ac7c2c1..62910ac90f6 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -772,9 +772,12 @@ static ssize_t show_protocols(struct device *device, if (dev->driver_type == RC_DRIVER_SCANCODE) { enabled = dev->rc_map.rc_type; allowed = dev->allowed_protos; - } else { + } else if (dev->raw) { enabled = dev->raw->enabled_protocols; allowed = ir_raw_get_allowed_protocols(); + } else { + mutex_unlock(&dev->lock); + return -ENODEV; } IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c index 5d06b899e85..9e55a0c9ac5 100644 --- a/drivers/media/rc/winbond-cir.c +++ b/drivers/media/rc/winbond-cir.c @@ -1003,39 +1003,10 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n", data->wbase, data->ebase, data->sbase, data->irq); - if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) { - dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", - data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1); - err = -EBUSY; - goto exit_free_data; - } - - if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) { - dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", - data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1); - err = -EBUSY; - goto exit_release_wbase; - } - - if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) { - dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", - data->sbase, data->sbase + SP_IOMEM_LEN - 1); - err = -EBUSY; - goto exit_release_ebase; - } - - err = request_irq(data->irq, wbcir_irq_handler, - IRQF_DISABLED, DRVNAME, device); - if (err) { - dev_err(dev, "Failed to claim IRQ %u\n", data->irq); - err = -EBUSY; - goto exit_release_sbase; - } - led_trigger_register_simple("cir-tx", &data->txtrigger); if (!data->txtrigger) { err = -ENOMEM; - goto exit_free_irq; + goto exit_free_data; } led_trigger_register_simple("cir-rx", &data->rxtrigger); @@ -1058,6 +1029,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) goto exit_unregister_led; } + data->dev->driver_type = RC_DRIVER_IR_RAW; data->dev->driver_name = WBCIR_NAME; data->dev->input_name = WBCIR_NAME; data->dev->input_phys = "wbcir/cir0"; @@ -1073,9 +1045,38 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) data->dev->priv = data; data->dev->dev.parent = &device->dev; + if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) { + dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", + data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1); + err = -EBUSY; + goto exit_free_rc; + } + + if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) { + dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", + data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1); + err = -EBUSY; + goto exit_release_wbase; + } + + if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) { + dev_err(dev, "Region 0x%lx-0x%lx already in use!\n", + data->sbase, data->sbase + SP_IOMEM_LEN - 1); + err = -EBUSY; + goto exit_release_ebase; + } + + err = request_irq(data->irq, wbcir_irq_handler, + IRQF_DISABLED, DRVNAME, device); + if (err) { + dev_err(dev, "Failed to claim IRQ %u\n", data->irq); + err = -EBUSY; + goto exit_release_sbase; + } + err = rc_register_device(data->dev); if (err) - goto exit_free_rc; + goto exit_free_irq; device_init_wakeup(&device->dev, 1); @@ -1083,14 +1084,6 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) return 0; -exit_free_rc: - rc_free_device(data->dev); -exit_unregister_led: - led_classdev_unregister(&data->led); -exit_unregister_rxtrigger: - led_trigger_unregister_simple(data->rxtrigger); -exit_unregister_txtrigger: - led_trigger_unregister_simple(data->txtrigger); exit_free_irq: free_irq(data->irq, device); exit_release_sbase: @@ -1099,6 +1092,14 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) release_region(data->ebase, EHFUNC_IOMEM_LEN); exit_release_wbase: release_region(data->wbase, WAKEUP_IOMEM_LEN); +exit_free_rc: + rc_free_device(data->dev); +exit_unregister_led: + led_classdev_unregister(&data->led); +exit_unregister_rxtrigger: + led_trigger_unregister_simple(data->rxtrigger); +exit_unregister_txtrigger: + led_trigger_unregister_simple(data->txtrigger); exit_free_data: kfree(data); pnp_set_drvdata(device, NULL); diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c index c03eb29a9ee..9945aaf1bba 100644 --- a/drivers/media/video/au0828/au0828-video.c +++ b/drivers/media/video/au0828/au0828-video.c @@ -1697,14 +1697,18 @@ static int vidioc_streamoff(struct file *file, void *priv, (AUVI_INPUT(i).audio_setup)(dev, 0); } - videobuf_streamoff(&fh->vb_vidq); - res_free(fh, AU0828_RESOURCE_VIDEO); + if (res_check(fh, AU0828_RESOURCE_VIDEO)) { + videobuf_streamoff(&fh->vb_vidq); + res_free(fh, AU0828_RESOURCE_VIDEO); + } } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { dev->vbi_timeout_running = 0; del_timer_sync(&dev->vbi_timeout); - videobuf_streamoff(&fh->vb_vbiq); - res_free(fh, AU0828_RESOURCE_VBI); + if (res_check(fh, AU0828_RESOURCE_VBI)) { + videobuf_streamoff(&fh->vb_vbiq); + res_free(fh, AU0828_RESOURCE_VBI); + } } return 0; diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c index 89fec4c500a..731cd1664c4 100644 --- a/drivers/media/video/gspca/spca506.c +++ b/drivers/media/video/gspca/spca506.c @@ -685,7 +685,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const struct usb_device_id device_table[] __devinitconst = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06e1, 0xa190)}, /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 {USB_DEVICE(0x0733, 0x0430)}, */ diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index 514aea76eaa..4c0394a8afd 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c @@ -284,12 +284,13 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev) hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00); + dev->status = STATUS_STREAMING; + INIT_WORK(&dev->worker, hdpvr_transmit_buffers); queue_work(dev->workqueue, &dev->worker); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, "streaming started\n"); - dev->status = STATUS_STREAMING; return 0; } diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c index e799331389b..c4eca15baf6 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c +++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c @@ -319,7 +319,17 @@ static struct tda829x_config tda829x_no_probe = { .probe_tuner = TDA829X_DONT_PROBE, }; +static struct tda18271_std_map hauppauge_tda18271_dvbt_std_map = { + .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4, + .if_lvl = 1, .rfagc_top = 0x37, }, + .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5, + .if_lvl = 1, .rfagc_top = 0x37, }, + .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6, + .if_lvl = 1, .rfagc_top = 0x37, }, +}; + static struct tda18271_config hauppauge_tda18271_dvb_config = { + .std_map = &hauppauge_tda18271_dvbt_std_map, .gate = TDA18271_GATE_ANALOG, .output_opt = TDA18271_OUTPUT_LT_OFF, }; diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c index 543a80395b7..5afdbb7bbea 100644 --- a/drivers/media/video/uvc/uvc_v4l2.c +++ b/drivers/media/video/uvc/uvc_v4l2.c @@ -65,6 +65,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain, goto done; } + /* Prevent excessive memory consumption, as well as integer + * overflows. + */ + if (xmap->menu_count == 0 || + xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) { + ret = -EINVAL; + goto done; + } + size = xmap->menu_count * sizeof(*map->menu_info); map->menu_info = kmalloc(size, GFP_KERNEL); if (map->menu_info == NULL) { @@ -701,7 +710,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) break; } pin = iterm->id; - } else if (pin < selector->bNrInPins) { + } else if (index < selector->bNrInPins) { pin = selector->baSourceID[index]; list_for_each_entry(iterm, &chain->entities, chain) { if (!UVC_ENTITY_IS_ITERM(iterm)) diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index 2a38d5e5535..cf2401a041a 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -200,6 +200,7 @@ struct uvc_xu_control { /* Maximum allowed number of control mappings per device */ #define UVC_MAX_CONTROL_MAPPINGS 1024 +#define UVC_MAX_CONTROL_MENU_ENTRIES 32 /* Devices quirks */ #define UVC_QUIRK_STATUS_INTERVAL 0x00000001 diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c index 69e8c6ffcc4..bda252f04e8 100644 --- a/drivers/media/video/v4l2-ioctl.c +++ b/drivers/media/video/v4l2-ioctl.c @@ -2289,6 +2289,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size, struct v4l2_ext_controls *ctrls = parg; if (ctrls->count != 0) { + if (ctrls->count > V4L2_CID_MAX_CTRLS) { + ret = -EINVAL; + break; + } *user_ptr = (void __user *)ctrls->controls; *kernel_ptr = (void **)&ctrls->controls; *array_size = sizeof(struct v4l2_ext_control) diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index a20e1c41bed..ccd81b1540a 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -408,8 +408,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data) u32 fatevent; int err; - add_interrupt_randomness(irq); - err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1, event_regs, 3); if (err) @@ -938,9 +936,6 @@ static int __devinit ab3100_probe(struct i2c_client *client, err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler, IRQF_ONESHOT, "ab3100-core", ab3100); - /* This real unpredictable IRQ is of course sampled for entropy */ - rand_initialize_irq(client->irq); - if (err) goto exit_no_irq; diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c index 3d7dce671b9..d69dc4bf8bb 100644 --- a/drivers/mfd/ab3550-core.c +++ b/drivers/mfd/ab3550-core.c @@ -1309,8 +1309,6 @@ static int __init ab3550_probe(struct i2c_client *client, err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler, IRQF_ONESHOT, "ab3550-core", ab); - /* This real unpredictable IRQ is of course sampled for entropy */ - rand_initialize_irq(client->irq); if (err) goto exit_no_irq; diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c index f1d88483112..2943fbf1423 100644 --- a/drivers/mfd/adp5520.c +++ b/drivers/mfd/adp5520.c @@ -36,6 +36,7 @@ struct adp5520_chip { struct blocking_notifier_head notifier_list; int irq; unsigned long id; + uint8_t mode; }; static int __adp5520_read(struct i2c_client *client, @@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode); + /* All other bits are W1C */ + chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY; + adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); return 0; } @@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode); return 0; } #endif diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c index 155fa040788..e488a78a2fd 100644 --- a/drivers/mfd/cs5535-mfd.c +++ b/drivers/mfd/cs5535-mfd.c @@ -179,7 +179,7 @@ static struct pci_device_id cs5535_mfd_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl); -static struct pci_driver cs5535_mfd_drv = { +static struct pci_driver cs5535_mfd_driver = { .name = DRV_NAME, .id_table = cs5535_mfd_pci_tbl, .probe = cs5535_mfd_probe, @@ -188,12 +188,12 @@ static struct pci_driver cs5535_mfd_drv = { static int __init cs5535_mfd_init(void) { - return pci_register_driver(&cs5535_mfd_drv); + return pci_register_driver(&cs5535_mfd_driver); } static void __exit cs5535_mfd_exit(void) { - pci_unregister_driver(&cs5535_mfd_drv); + pci_unregister_driver(&cs5535_mfd_driver); } module_init(cs5535_mfd_init); diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 43a76c41cfc..db662e2dcfa 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work) } local_irq_enable(); ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq))); + } while (gpio_get_value(pdata->gpio)); } static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 0902523af62..b36aadb6e70 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -18,6 +18,10 @@ #include #include +static struct device_type mfd_dev_type = { + .name = "mfd_device", +}; + int mfd_cell_enable(struct platform_device *pdev) { const struct mfd_cell *cell = mfd_get_cell(pdev); @@ -87,6 +91,7 @@ static int mfd_add_device(struct device *parent, int id, goto fail_device; pdev->dev.parent = parent; + pdev->dev.type = &mfd_dev_type; if (cell->pdata_size) { ret = platform_device_add_data(pdev, @@ -122,7 +127,7 @@ static int mfd_add_device(struct device *parent, int id, } if (!cell->ignore_resource_conflicts) { - ret = acpi_check_resource_conflict(res); + ret = acpi_check_resource_conflict(&res[r]); if (ret) goto fail_res; } @@ -182,10 +187,16 @@ EXPORT_SYMBOL(mfd_add_devices); static int mfd_remove_devices_fn(struct device *dev, void *c) { - struct platform_device *pdev = to_platform_device(dev); - const struct mfd_cell *cell = mfd_get_cell(pdev); + struct platform_device *pdev; + const struct mfd_cell *cell; atomic_t **usage_count = c; + if (dev->type != &mfd_dev_type) + return 0; + + pdev = to_platform_device(dev); + cell = mfd_get_cell(pdev); + /* find the base address of usage_count pointers (for freeing) */ if (!*usage_count || (cell->usage_count < *usage_count)) *usage_count = cell->usage_count; diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index eb3b5f88e56..b0563b66d10 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -145,8 +145,17 @@ static int twl6030_irq_thread(void *data) } local_irq_enable(); } - ret = twl_i2c_write(TWL_MODULE_PIH, sts.bytes, - REG_INT_STS_A, 3); /* clear INT_STS_A */ + + /* + * NOTE: + * Simulation confirms that documentation is wrong w.r.t the + * interrupt status clear operation. A single *byte* write to + * any one of STS_A to STS_C register results in all three + * STS registers being reset. Since it does not matter which + * value is written, all three registers are cleared on a + * single byte write, so we just use 0x0 to clear. + */ + ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A); if (ret) pr_warning("twl6030: I2C error in clearing PIH ISR\n"); diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c index f742745ff35..b90f3e06b6c 100644 --- a/drivers/mfd/wm831x-otp.c +++ b/drivers/mfd/wm831x-otp.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL); int wm831x_otp_init(struct wm831x *wm831x) { + char uuid[WM831X_UNIQUE_ID_LEN]; int ret; ret = device_create_file(wm831x->dev, &dev_attr_unique_id); @@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x) dev_err(wm831x->dev, "Unique ID attribute not created: %d\n", ret); + ret = wm831x_unique_id_read(wm831x, uuid); + if (ret == 0) + add_device_randomness(uuid, sizeof(uuid)); + else + dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret); + return ret; } diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index efec4139c3f..b1f16d6084a 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c @@ -244,6 +244,7 @@ static int __devinit cb710_probe(struct pci_dev *pdev, if (err) return err; + spin_lock_init(&chip->irq_lock); chip->pdev = pdev; chip->iobase = pcim_iomap_table(pdev)[0]; diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c index bc685bfc4c3..87a390de054 100644 --- a/drivers/misc/cs5535-mfgpt.c +++ b/drivers/misc/cs5535-mfgpt.c @@ -262,7 +262,7 @@ static void __init reset_all_timers(void) * In other cases (such as with VSAless OpenFirmware), the system firmware * leaves timers available for us to use. */ -static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt) +static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt) { struct cs5535_mfgpt_timer timer = { .chip = mfgpt }; unsigned long flags; diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 8cebec5e85e..cdefef231fb 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -133,12 +133,17 @@ static int force_hwbrks; static int hwbreaks_ok; static int hw_break_val; static int hw_break_val2; +static int cont_instead_of_sstep; +static unsigned long cont_thread_id; +static unsigned long sstep_thread_id; #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; #endif +static unsigned long cont_addr; static unsigned long sstep_addr; +static int restart_from_top_after_write; static int sstep_state; /* Storage for the registers, in GDB format. */ @@ -186,7 +191,8 @@ static int kgdbts_unreg_thread(void *ptr) */ while (!final_ack) msleep_interruptible(1500); - + /* Pause for any other threads to exit after final ack. */ + msleep_interruptible(1000); if (configured) kgdb_unregister_io_module(&kgdbts_io_ops); configured = 0; @@ -210,7 +216,7 @@ static unsigned long lookup_addr(char *arg) if (!strcmp(arg, "kgdbts_break_test")) addr = (unsigned long)kgdbts_break_test; else if (!strcmp(arg, "sys_open")) - addr = (unsigned long)sys_open; + addr = (unsigned long)do_sys_open; else if (!strcmp(arg, "do_fork")) addr = (unsigned long)do_fork; else if (!strcmp(arg, "hw_break_val")) @@ -282,6 +288,16 @@ static void hw_break_val_write(void) hw_break_val++; } +static int get_thread_id_continue(char *put_str, char *arg) +{ + char *ptr = &put_str[11]; + + if (put_str[1] != 'T' || put_str[2] != '0') + return 1; + kgdb_hex2long(&ptr, &cont_thread_id); + return 0; +} + static int check_and_rewind_pc(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); @@ -298,13 +314,21 @@ static int check_and_rewind_pc(char *put_str, char *arg) if (addr + BREAK_INSTR_SIZE == ip) offset = -BREAK_INSTR_SIZE; #endif - if (strcmp(arg, "silent") && ip + offset != addr) { + + if (arch_needs_sstep_emulation && sstep_addr && + ip + offset == sstep_addr && + ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) { + /* This is special case for emulated single step */ + v2printk("Emul: rewind hit single step bp\n"); + restart_from_top_after_write = 1; + } else if (strcmp(arg, "silent") && ip + offset != addr) { eprintk("kgdbts: BP mismatch %lx expected %lx\n", ip + offset, addr); return 1; } /* Readjust the instruction pointer if needed */ ip += offset; + cont_addr = ip; #ifdef GDB_ADJUSTS_BREAK_OFFSET instruction_pointer_set(&kgdbts_regs, ip); #endif @@ -314,6 +338,8 @@ static int check_and_rewind_pc(char *put_str, char *arg) static int check_single_step(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); + static int matched_id; + /* * From an arch indepent point of view the instruction pointer * should be on a different instruction @@ -323,6 +349,29 @@ static int check_single_step(char *put_str, char *arg) gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); v2printk("Singlestep stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); + + if (sstep_thread_id != cont_thread_id) { + /* + * Ensure we stopped in the same thread id as before, else the + * debugger should continue until the original thread that was + * single stepped is scheduled again, emulating gdb's behavior. + */ + v2printk("ThrID does not match: %lx\n", cont_thread_id); + if (arch_needs_sstep_emulation) { + if (matched_id && + instruction_pointer(&kgdbts_regs) != addr) + goto continue_test; + matched_id++; + ts.idx -= 2; + sstep_state = 0; + return 0; + } + cont_instead_of_sstep = 1; + ts.idx -= 4; + return 0; + } +continue_test: + matched_id = 0; if (instruction_pointer(&kgdbts_regs) == addr) { eprintk("kgdbts: SingleStep failed at %lx\n", instruction_pointer(&kgdbts_regs)); @@ -364,10 +413,40 @@ static int got_break(char *put_str, char *arg) return 1; } +static void get_cont_catch(char *arg) +{ + /* Always send detach because the test is completed at this point */ + fill_get_buf("D"); +} + +static int put_cont_catch(char *put_str, char *arg) +{ + /* This is at the end of the test and we catch any and all input */ + v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id); + ts.idx--; + return 0; +} + +static int emul_reset(char *put_str, char *arg) +{ + if (strncmp(put_str, "$OK", 3)) + return 1; + if (restart_from_top_after_write) { + restart_from_top_after_write = 0; + ts.idx = -1; + } + return 0; +} + static void emul_sstep_get(char *arg) { if (!arch_needs_sstep_emulation) { - fill_get_buf(arg); + if (cont_instead_of_sstep) { + cont_instead_of_sstep = 0; + fill_get_buf("c"); + } else { + fill_get_buf(arg); + } return; } switch (sstep_state) { @@ -397,9 +476,11 @@ static void emul_sstep_get(char *arg) static int emul_sstep_put(char *put_str, char *arg) { if (!arch_needs_sstep_emulation) { - if (!strncmp(put_str+1, arg, 2)) - return 0; - return 1; + char *ptr = &put_str[11]; + if (put_str[1] != 'T' || put_str[2] != '0') + return 1; + kgdb_hex2long(&ptr, &sstep_thread_id); + return 0; } switch (sstep_state) { case 1: @@ -410,8 +491,7 @@ static int emul_sstep_put(char *put_str, char *arg) v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); /* Want to stop at IP + break instruction size by default */ - sstep_addr = instruction_pointer(&kgdbts_regs) + - BREAK_INSTR_SIZE; + sstep_addr = cont_addr + BREAK_INSTR_SIZE; break; case 2: if (strncmp(put_str, "$OK", 3)) { @@ -423,6 +503,9 @@ static int emul_sstep_put(char *put_str, char *arg) if (strncmp(put_str, "$T0", 3)) { eprintk("kgdbts: failed continue sstep\n"); return 1; + } else { + char *ptr = &put_str[11]; + kgdb_hex2long(&ptr, &sstep_thread_id); } break; case 4: @@ -501,10 +584,10 @@ static struct test_struct bad_read_test[] = { static struct test_struct singlestep_break_test[] = { { "?", "S0*" }, /* Clear break points */ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ - { "c", "T0*", }, /* Continue */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ + { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc }, { "write", "OK", write_regs }, /* Write registers */ - { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "kgdbts_break_test", NULL, check_single_step }, { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */ @@ -522,16 +605,16 @@ static struct test_struct singlestep_break_test[] = { static struct test_struct do_fork_test[] = { { "?", "S0*" }, /* Clear break points */ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ - { "c", "T0*", }, /* Continue */ - { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ - { "write", "OK", write_regs }, /* Write registers */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs, emul_reset }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "do_fork", NULL, check_single_step }, { "do_fork", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ - { "", "" }, + { "", "", get_cont_catch, put_cont_catch }, }; /* Test for hitting a breakpoint at sys_open for what ever the number @@ -540,16 +623,16 @@ static struct test_struct do_fork_test[] = { static struct test_struct sys_open_test[] = { { "?", "S0*" }, /* Clear break points */ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ - { "c", "T0*", }, /* Continue */ - { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ - { "write", "OK", write_regs }, /* Write registers */ + { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */ { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */ + { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */ + { "write", "OK", write_regs, emul_reset }, /* Write registers */ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */ { "g", "sys_open", NULL, check_single_step }, { "sys_open", "OK", sw_break, }, /* set sw breakpoint */ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */ - { "", "" }, + { "", "", get_cont_catch, put_cont_catch }, }; /* @@ -692,8 +775,8 @@ static int run_simple_test(int is_get_char, int chr) /* This callback is a put char which is when kgdb sends data to * this I/O module. */ - if (ts.tst[ts.idx].get[0] == '\0' && - ts.tst[ts.idx].put[0] == '\0') { + if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' && + !ts.tst[ts.idx].get_handler) { eprintk("kgdbts: ERROR: beyond end of test on" " '%s' line %i\n", ts.name, ts.idx); return 0; @@ -906,6 +989,17 @@ static void kgdbts_run_tests(void) if (ptr) sstep_test = simple_strtol(ptr+1, NULL, 10); + /* All HW break point tests */ + if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { + hwbreaks_ok = 1; + v1printk("kgdbts:RUN hw breakpoint test\n"); + run_breakpoint_test(1); + v1printk("kgdbts:RUN hw write breakpoint test\n"); + run_hw_break_test(1); + v1printk("kgdbts:RUN access write breakpoint test\n"); + run_hw_break_test(0); + } + /* required internal KGDB tests */ v1printk("kgdbts:RUN plant and detach test\n"); run_plant_and_detach_test(0); @@ -923,35 +1017,11 @@ static void kgdbts_run_tests(void) /* ===Optional tests=== */ - /* All HW break point tests */ - if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) { - hwbreaks_ok = 1; - v1printk("kgdbts:RUN hw breakpoint test\n"); - run_breakpoint_test(1); - v1printk("kgdbts:RUN hw write breakpoint test\n"); - run_hw_break_test(1); - v1printk("kgdbts:RUN access write breakpoint test\n"); - run_hw_break_test(0); - } - if (nmi_sleep) { v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep); run_nmi_sleep_test(nmi_sleep); } -#ifdef CONFIG_DEBUG_RODATA - /* Until there is an api to write to read-only text segments, use - * HW breakpoints for the remainder of any tests, else print a - * failure message if hw breakpoints do not work. - */ - if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) { - eprintk("kgdbts: HW breakpoints do not work," - "skipping remaining tests\n"); - return; - } - force_hwbrks = 1; -#endif /* CONFIG_DEBUG_RODATA */ - /* If the do_fork test is run it will be the last test that is * executed because a kernel thread will be spawned at the very * end to unregister the debug hooks. diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index 97e1a1f18a9..f51c81e1884 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c @@ -93,6 +93,7 @@ #define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C #define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040 #define CLKCFG_REG_OFFSET 0x500 +#define FUNCSEL_REG_OFFSET 0x508 #define PCH_PHUB_OROM_SIZE 15360 @@ -111,11 +112,13 @@ * @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val * @int_reduce_control_reg: INT_REDUCE_CONTROL registers val * @clkcfg_reg: CLK CFG register val + * @funcsel_reg: Function select register value * @pch_phub_base_address: Register base address * @pch_phub_extrom_base_address: external rom base address * @pch_mac_start_address: MAC address area start address * @pch_opt_rom_start_address: Option ROM start address * @ioh_type: Save IOH type + * @pdev: pointer to pci device struct */ struct pch_phub_reg { u32 phub_id_reg; @@ -131,11 +134,13 @@ struct pch_phub_reg { u32 intpin_reg_wpermit_reg3; u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG]; u32 clkcfg_reg; + u32 funcsel_reg; void __iomem *pch_phub_base_address; void __iomem *pch_phub_extrom_base_address; u32 pch_mac_start_address; u32 pch_opt_rom_start_address; int ioh_type; + struct pci_dev *pdev; }; /* SROM SPEC for MAC address assignment offset */ @@ -214,6 +219,8 @@ static void pch_phub_save_reg_conf(struct pci_dev *pdev) __func__, i, chip->int_reduce_control_reg[i]); } chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET); + if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) + chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET); } /* pch_phub_restore_reg_conf - restore register configuration */ @@ -274,6 +281,8 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev) } iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET); + if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) + iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET); } /** @@ -494,6 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, unsigned int orom_size; int ret; int err; + ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(container_of(kobj, struct device, kobj)); @@ -505,6 +515,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, } /* Get Rom signature */ + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + goto exrom_map_err; + pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address, (unsigned char *)&rom_signature); rom_signature &= 0xff; @@ -535,10 +549,13 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, goto return_err; } return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); mutex_unlock(&pch_phub_mutex); return addr_offset; return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); +exrom_map_err: mutex_unlock(&pch_phub_mutex); return_err_nomutex: return err; @@ -551,6 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, int err; unsigned int addr_offset; int ret; + ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(container_of(kobj, struct device, kobj)); @@ -567,6 +585,12 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, goto return_ok; } + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) { + err = -ENOMEM; + goto exrom_map_err; + } + for (addr_offset = 0; addr_offset < count; addr_offset++) { if (PCH_PHUB_OROM_SIZE < off + addr_offset) goto return_ok; @@ -581,10 +605,14 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, } return_ok: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); mutex_unlock(&pch_phub_mutex); return addr_offset; return_err: + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); + +exrom_map_err: mutex_unlock(&pch_phub_mutex); return err; } @@ -594,8 +622,14 @@ static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr, { u8 mac[8]; struct pch_phub_reg *chip = dev_get_drvdata(dev); + ssize_t rom_size; + + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; pch_phub_read_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); @@ -605,6 +639,7 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u8 mac[6]; + ssize_t rom_size; struct pch_phub_reg *chip = dev_get_drvdata(dev); if (count != 18) @@ -614,7 +649,12 @@ static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr, (u32 *)&mac[0], (u32 *)&mac[1], (u32 *)&mac[2], (u32 *)&mac[3], (u32 *)&mac[4], (u32 *)&mac[5]); + chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size); + if (!chip->pch_phub_extrom_base_address) + return -ENOMEM; + pch_phub_write_gbe_mac_addr(chip, mac); + pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address); return count; } @@ -637,7 +677,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, int retval; int ret; - ssize_t rom_size; struct pch_phub_reg *chip; chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL); @@ -674,19 +713,7 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, "in pch_phub_base_address variable is %p\n", __func__, chip->pch_phub_base_address); - if (id->driver_data != 3) { - chip->pch_phub_extrom_base_address =\ - pci_map_rom(pdev, &rom_size); - if (chip->pch_phub_extrom_base_address == 0) { - dev_err(&pdev->dev, "%s: pci_map_rom FAILED", __func__); - ret = -ENOMEM; - goto err_pci_map; - } - dev_dbg(&pdev->dev, "%s : " - "pci_map_rom SUCCESS and value in " - "pch_phub_extrom_base_address variable is %p\n", - __func__, chip->pch_phub_extrom_base_address); - } + chip->pdev = pdev; /* Save pci device struct */ if (id->driver_data == 1) { /* EG20T PCH */ retval = sysfs_create_file(&pdev->dev.kobj, @@ -735,6 +762,8 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, * Device8(GbE) */ iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14); + /* set the interrupt delay value */ + iowrite32(0x25, chip->pch_phub_base_address + 0x140); chip->pch_opt_rom_start_address =\ PCH_PHUB_ROM_START_ADDR_ML7223; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; @@ -752,8 +781,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, * Device6(SATA 2):f */ iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14); - /* set the interrupt delay value */ - iowrite32(0x25, chip->pch_phub_base_address + 0x140); chip->pch_opt_rom_start_address =\ PCH_PHUB_ROM_START_ADDR_ML7223; chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223; @@ -783,8 +810,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev, sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); err_sysfs_create: - pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); -err_pci_map: pci_iounmap(pdev, chip->pch_phub_base_address); err_pci_iomap: pci_release_regions(pdev); @@ -802,7 +827,6 @@ static void __devexit pch_phub_remove(struct pci_dev *pdev) sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr); sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr); - pci_unmap_rom(pdev, chip->pch_phub_extrom_base_address); pci_iounmap(pdev, chip->pch_phub_base_address); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 8d082b46426..d971817182f 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -53,6 +53,10 @@ #include #include "xpc.h" +#ifdef CONFIG_X86_64 +#include +#endif + /* define two XPC debug device structures to be used with dev_dbg() et al */ struct device_driver xpc_dbg_name = { @@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) return NOTIFY_DONE; } +/* Used to only allow one cpu to complete disconnect */ +static unsigned int xpc_die_disconnecting; + /* * Notify other partitions to deactivate from us by first disengaging from all * references to our memory. @@ -1092,6 +1099,9 @@ xpc_die_deactivate(void) long keep_waiting; long wait_to_print; + if (cmpxchg(&xpc_die_disconnecting, 0, 1)) + return; + /* keep xpc_hb_checker thread from doing anything (just in case) */ xpc_exiting = 1; @@ -1159,7 +1169,7 @@ xpc_die_deactivate(void) * about the lack of a heartbeat. */ static int -xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) +xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args) { #ifdef CONFIG_IA64 /* !!! temporary kludge */ switch (event) { @@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) break; } #else - xpc_die_deactivate(); + struct die_args *die_args = _die_args; + + switch (event) { + case DIE_TRAP: + if (die_args->trapnr == X86_TRAP_DF) + xpc_die_deactivate(); + + if (((die_args->trapnr == X86_TRAP_MF) || + (die_args->trapnr == X86_TRAP_XF)) && + !user_mode_vm(die_args->regs)) + xpc_die_deactivate(); + + break; + case DIE_INT3: + case DIE_DEBUG: + break; + case DIE_OOPS: + case DIE_GPF: + default: + xpc_die_deactivate(); + } #endif return NOTIFY_DONE; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 17bbacb1b4b..cc2ae7ec0d2 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1742,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1765,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index a4454c0a574..a26353bff2e 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -246,6 +246,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( goto idata_err; } + if (!idata->buf_bytes) + return idata; + idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); if (!idata->buf) { err = -ENOMEM; @@ -292,25 +295,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, if (IS_ERR(idata)) return PTR_ERR(idata); - cmd.opcode = idata->ic.opcode; - cmd.arg = idata->ic.arg; - cmd.flags = idata->ic.flags; - - data.sg = &sg; - data.sg_len = 1; - data.blksz = idata->ic.blksz; - data.blocks = idata->ic.blocks; - - sg_init_one(data.sg, idata->buf, idata->buf_bytes); - - if (idata->ic.write_flag) - data.flags = MMC_DATA_WRITE; - else - data.flags = MMC_DATA_READ; - - mrq.cmd = &cmd; - mrq.data = &data; - md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; @@ -323,6 +307,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, goto cmd_done; } + cmd.opcode = idata->ic.opcode; + cmd.arg = idata->ic.arg; + cmd.flags = idata->ic.flags; + + if (idata->buf_bytes) { + data.sg = &sg; + data.sg_len = 1; + data.blksz = idata->ic.blksz; + data.blocks = idata->ic.blocks; + + sg_init_one(data.sg, idata->buf, idata->buf_bytes); + + if (idata->ic.write_flag) + data.flags = MMC_DATA_WRITE; + else + data.flags = MMC_DATA_READ; + + /* data.flags must already be set before doing this. */ + mmc_set_data_timeout(&data, card); + + /* Allow overriding the timeout_ns for empirical tuning. */ + if (idata->ic.data_timeout_ns) + data.timeout_ns = idata->ic.data_timeout_ns; + + if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { + /* + * Pretend this is a data transfer and rely on the + * host driver to compute timeout. When all host + * drivers support cmd.cmd_timeout for R1B, this + * can be changed to: + * + * mrq.data = NULL; + * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; + */ + data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; + } + + mrq.data = &data; + } + + mrq.cmd = &cmd; + mmc_claim_host(card->host); if (idata->ic.is_acmd) { @@ -331,24 +357,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, goto cmd_rel_host; } - /* data.flags must already be set before doing this. */ - mmc_set_data_timeout(&data, card); - /* Allow overriding the timeout_ns for empirical tuning. */ - if (idata->ic.data_timeout_ns) - data.timeout_ns = idata->ic.data_timeout_ns; - - if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { - /* - * Pretend this is a data transfer and rely on the host driver - * to compute timeout. When all host drivers support - * cmd.cmd_timeout for R1B, this can be changed to: - * - * mrq.data = NULL; - * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; - */ - data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; - } - mmc_wait_for_req(card->host, &mrq); if (cmd.error) { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c9a43a6212d..e6813212fdc 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -353,13 +353,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; + card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ - card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { u8 hc_erase_grp_sz = diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index c8dbdbe547f..0fc013a905b 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -306,7 +306,7 @@ static int mmc_read_switch(struct mmc_card *card) goto out; } - if (status[13] & 0x02) + if (status[13] & UHS_SDR50_BUS_SPEED) card->sw_caps.hs_max_dtr = 50000000; if (card->scr.sda_spec3) { diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 1f12ec3dc1a..d91d641f0c5 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -706,7 +706,7 @@ static int mmc_sdio_resume(struct mmc_host *host) } if (!err && host->sdio_irqs) - mmc_signal_sdio_irq(host); + wake_up_process(host->sdio_irq_thread); mmc_release_host(host); /* diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index 8b250c12831..7fe09d47c22 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -27,18 +27,20 @@ #include "sdio_ops.h" -static int process_sdio_pending_irqs(struct mmc_card *card) +static int process_sdio_pending_irqs(struct mmc_host *host) { + struct mmc_card *card = host->card; int i, ret, count; unsigned char pending; struct sdio_func *func; /* * Optimization, if there is only 1 function interrupt registered - * call irq handler directly + * and we know an IRQ was signaled then call irq handler directly. + * Otherwise do the full probe. */ func = card->sdio_single_irq; - if (func) { + if (func && host->sdio_irq_pending) { func->irq_handler(func); return 1; } @@ -115,7 +117,8 @@ static int sdio_irq_thread(void *_host) ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); if (ret) break; - ret = process_sdio_pending_irqs(host->card); + ret = process_sdio_pending_irqs(host); + host->sdio_irq_pending = false; mmc_release_host(host); /* diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index aa8039f473c..b6cd3867f72 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -468,7 +468,14 @@ static void atmci_init_debugfs(struct atmel_mci_slot *slot) static inline unsigned int ns_to_clocks(struct atmel_mci *host, unsigned int ns) { - return (ns * (host->bus_hz / 1000000) + 999) / 1000; + /* + * It is easier here to use us instead of ns for the timeout, + * it prevents from overflows during calculation. + */ + unsigned int us = DIV_ROUND_UP(ns, 1000); + + /* Maximum clock frequency is host->bus_hz/2 */ + return us * (DIV_ROUND_UP(host->bus_hz, 2000000)); } static void atmci_set_timeout(struct atmel_mci *host, diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index d513d47364d..74160eb3bf0 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -278,11 +278,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); + spin_unlock(&host->lock); + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index ba31abee948..6fe8cede417 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -139,8 +139,9 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) imx_data->scratchpad = val; return; case SDHCI_COMMAND: - if ((host->cmd->opcode == MMC_STOP_TRANSMISSION) - && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || + host->cmd->opcode == MMC_SET_BLOCK_COUNT) && + (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) val |= SDHCI_CMD_ABORTCMD; writel(val << 16 | imx_data->scratchpad, host->ioaddr + SDHCI_TRANSFER_MODE); @@ -244,8 +245,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd } pltfm_host->priv = imx_data; - if (!cpu_is_mx25()) - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; if (cpu_is_mx25() || cpu_is_mx35()) { /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index c3b08f11194..62ca03af8ac 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) int div = 1; u32 temp; + if (clock == 0) + goto out; + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (clock == 0) - goto out; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 936bbca19c0..d3b3115def4 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -140,6 +140,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = { static const struct sdhci_pci_fixes sdhci_cafe = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | + SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 8cd999f4af5..4a5c5012713 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -589,7 +589,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) sdhci_remove_host(host, 1); - for (ptr = 0; ptr < 3; ptr++) { + for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { if (sc->clk_bus[ptr]) { clk_disable(sc->clk_bus[ptr]); clk_put(sc->clk_bus[ptr]); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6d3de0888e7..8bcd5e98d99 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1340,8 +1340,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if ((ios->timing == MMC_TIMING_UHS_SDR50) || (ios->timing == MMC_TIMING_UHS_SDR104) || (ios->timing == MMC_TIMING_UHS_DDR50) || - (ios->timing == MMC_TIMING_UHS_SDR25) || - (ios->timing == MMC_TIMING_UHS_SDR12)) + (ios->timing == MMC_TIMING_UHS_SDR25)) ctrl |= SDHCI_CTRL_HISPD; ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); @@ -2227,9 +2226,8 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) /* Disable tuning since we are suspending */ if (host->version >= SDHCI_SPEC_300 && host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1) { + del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); } ret = mmc_suspend_host(host->mmc); @@ -2517,8 +2515,9 @@ int sdhci_add_host(struct sdhci_host *host) mmc_card_is_removable(mmc)) mmc->caps |= MMC_CAP_NEEDS_POLL; - /* UHS-I mode(s) supported by the host controller. */ - if (host->version >= SDHCI_SPEC_300) + /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ + if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50)) mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; /* SDR104 supports also implies SDR50 support */ diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index b78f23169d4..8cd983cdc64 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -284,6 +284,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; dev->mtd.erasesize = erase_size; dev->mtd.writesize = 1; + dev->mtd.writebufsize = PAGE_SIZE; dev->mtd.type = MTD_RAM; dev->mtd.flags = MTD_CAP_RAM; dev->mtd.erase = block2mtd_erase; diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 772a0ff89e0..09d5b5aaea5 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c @@ -636,6 +636,7 @@ static int __init lart_flash_init (void) mtd.name = module_name; mtd.type = MTD_NORFLASH; mtd.writesize = 1; + mtd.writebufsize = 4; mtd.flags = MTD_CAP_NORFLASH; mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; mtd.erasesize = FLASH_BLOCKSIZE_MAIN; diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 35180e475c4..9fad104d4aa 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -930,6 +930,7 @@ static int __devinit m25p_probe(struct spi_device *spi) flash->mtd.dev.parent = &spi->dev; flash->page_size = info->page_size; + flash->mtd.writebufsize = flash->page_size; if (info->addr_width) flash->addr_width = info->addr_width; diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index e585263161b..f38c348e00b 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -266,7 +266,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) if (*(szlength) != '+') { devlength = simple_strtoul(szlength, &buffer, 0); - devlength = handle_unit(devlength, buffer) - devstart; + devlength = handle_unit(devlength, buffer); if (devlength < devstart) goto err_out; diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index 1e2c430aaad..867710a09a4 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c @@ -406,6 +406,7 @@ static int __devinit sst25l_probe(struct spi_device *spi) flash->mtd.flags = MTD_CAP_NORFLASH; flash->mtd.erasesize = flash_info->erase_size; flash->mtd.writesize = flash_info->page_size; + flash->mtd.writebufsize = flash_info->page_size; flash->mtd.size = flash_info->page_size * flash_info->nr_pages; flash->mtd.erase = sst25l_erase; flash->mtd.read = sst25l_read; diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c index e5bfd0e093b..0598d52eaf9 100644 --- a/drivers/mtd/maps/autcpu12-nvram.c +++ b/drivers/mtd/maps/autcpu12-nvram.c @@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = { static int __init init_autcpu12_sram (void) { - int err, save0, save1; + map_word tmp, save0, save1; + int err; autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K); if (!autcpu12_sram_map.virt) { @@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void) err = -EIO; goto out; } - simple_map_init(&autcpu_sram_map); + simple_map_init(&autcpu12_sram_map); /* * Check for 32K/128K @@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void) * Read and check result on ofs 0x0 * Restore contents */ - save0 = map_read32(&autcpu12_sram_map,0); - save1 = map_read32(&autcpu12_sram_map,0x10000); - map_write32(&autcpu12_sram_map,~save0,0x10000); + save0 = map_read(&autcpu12_sram_map, 0); + save1 = map_read(&autcpu12_sram_map, 0x10000); + tmp.x[0] = ~save0.x[0]; + map_write(&autcpu12_sram_map, tmp, 0x10000); /* if we find this pattern on 0x0, we have 32K size * restore contents and exit */ - if ( map_read32(&autcpu12_sram_map,0) != save0) { - map_write32(&autcpu12_sram_map,save0,0x0); + tmp = map_read(&autcpu12_sram_map, 0); + if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) { + map_write(&autcpu12_sram_map, save0, 0x0); goto map; } /* We have a 128K found, restore 0x10000 and set size * to 128K */ - map_write32(&autcpu12_sram_map,save1,0x10000); + map_write(&autcpu12_sram_map, save1, 0x10000); autcpu12_sram_map.size = SZ_128K; map: diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index ca385697446..bff8d4671ad 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) mutex_lock(&dev->lock); - if (dev->open++) + if (dev->open) goto unlock; kref_get(&dev->ref); @@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) goto error_release; unlock: + dev->open++; mutex_unlock(&dev->lock); blktrans_dev_put(dev); return ret; diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 9f8658ef946..72b788e738e 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1064,6 +1064,33 @@ static unsigned long mtd_get_unmapped_area(struct file *file, } #endif +static inline unsigned long get_vm_size(struct vm_area_struct *vma) +{ + return vma->vm_end - vma->vm_start; +} + +static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) +{ + return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; +} + +/* + * Set a new vm offset. + * + * Verify that the incoming offset really works as a page offset, + * and that the offset and size fit in a resource_size_t. + */ +static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) +{ + pgoff_t pgoff = off >> PAGE_SHIFT; + if (off != (resource_size_t) pgoff << PAGE_SHIFT) + return -EINVAL; + if (off + get_vm_size(vma) - 1 < off) + return -EINVAL; + vma->vm_pgoff = pgoff; + return 0; +} + /* * set up a mapping for shared memory segments */ @@ -1073,32 +1100,17 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; - unsigned long start; - unsigned long off; - u32 len; - - if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { - off = vma->vm_pgoff << PAGE_SHIFT; - start = map->phys; - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - vma->vm_flags |= VM_IO | VM_RESERVED; + /* This is broken because it assumes the MTD device is map-based + and that mtd->priv is a valid struct map_info. It should be + replaced with something that uses the mtd_get_unmapped_area() + operation properly. */ + if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { #ifdef pgprot_noncached - if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - return 0; + return vm_iomap_memory(vma, map->phys, map->size); } return -ENOSYS; #else diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index e3e40f44032..43130e8acea 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt) size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { + if (mtd->block_isbad && + mtd->block_isbad(mtd, page * record_size)) + continue; /* Assume the page is used */ mark_page_used(cxt, page); ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, @@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) /* oops_page_used is a bit field */ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, - BITS_PER_LONG)); + BITS_PER_LONG) * sizeof(unsigned long)); if (!cxt->oops_page_used) { printk(KERN_ERR "mtdoops: could not allocate page array\n"); return; diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 87ebb4e5b0c..f5cdc565b3e 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; static int cafe_device_ready(struct mtd_info *mtd) { struct cafe_priv *cafe = mtd->priv; - int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000); + int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000); uint32_t irqs = cafe_readl(cafe, NAND_IRQ); cafe_writel(cafe, irqs, NAND_IRQ); diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index ccbeaa1e4a8..c27ca6affa9 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c @@ -360,6 +360,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, buf += mtd->oobsize + mtd->writesize; len -= mtd->writesize; + offs += mtd->writesize; } return 0; } @@ -428,7 +429,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, /* Read the mirror version, if available */ if (md && (md->options & NAND_BBT_VERSION)) { scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, - mtd->writesize, td); + mtd->writesize, md); md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]); diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 357e8c5252a..1f2b8803cca 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -547,12 +547,6 @@ static char *get_partition_name(int i) return kstrdup(buf, GFP_KERNEL); } -static uint64_t divide(uint64_t n, uint32_t d) -{ - do_div(n, d); - return n; -} - /* * Initialize the nandsim structure. * @@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd) ns->geom.oobsz = mtd->oobsize; ns->geom.secsz = mtd->erasesize; ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz; - ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz); + ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz); ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz; ns->geom.secshift = ffs(ns->geom.secsz) - 1; ns->geom.pgshift = chip->page_shift; @@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd) if (!rptwear) return 0; - wear_eb_count = divide(mtd->size, mtd->erasesize); + wear_eb_count = div_u64(mtd->size, mtd->erasesize); mem = wear_eb_count * sizeof(unsigned long); if (mem / sizeof(unsigned long) != wear_eb_count) { NS_ERR("Too many erase blocks for wear reporting\n"); @@ -2361,6 +2355,7 @@ static int __init ns_init_module(void) uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize; if (new_size >> overridesize != nsmtd->erasesize) { NS_ERR("overridesize is too big\n"); + retval = -EINVAL; goto err_exit; } /* N.B. This relies on nand_scan not doing anything with the size before we change it */ diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 0db2c0e7656..02897077f16 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1139,7 +1139,8 @@ static int omap_nand_remove(struct platform_device *pdev) /* Release NAND device, its internal structures and partitions */ nand_release(&info->mtd); iounmap(info->nand.IO_ADDR_R); - kfree(&info->mtd); + release_mem_region(info->phys_base, NAND_IO_SIZE); + kfree(info); return 0; } diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index ed3d6cd2c6d..0e34d564941 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev) static struct mtd_blktrans_ops sm_ftl_ops = { .name = "smblk", - .major = -1, + .major = 0, .part_bits = SM_FTL_PARTN_BITS, .blksize = SM_SECTOR_SIZE, .getgeo = sm_getgeo, diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 531625fc925..129bad2e408 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c @@ -277,6 +277,12 @@ static int __init mtd_stresstest_init(void) (unsigned long long)mtd->size, mtd->erasesize, pgsize, ebcnt, pgcnt, mtd->oobsize); + if (ebcnt < 2) { + printk(PRINT_PREF "error: need at least 2 eraseblocks\n"); + err = -ENOSPC; + goto out_put_mtd; + } + /* Read or write up 2 eraseblocks at a time */ bufsize = mtd->erasesize * 2; @@ -315,6 +321,7 @@ static int __init mtd_stresstest_init(void) kfree(bbt); vfree(writebuf); vfree(readbuf); +out_put_mtd: put_mtd_device(mtd); if (err) printk(PRINT_PREF "error %d occurred\n", err); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 65626c1c446..2b351d0b1ff 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -816,6 +816,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id) struct ubi_volume *vol = ubi->volumes[vol_id]; int err, old_reserved_pebs = vol->reserved_pebs; + if (ubi->ro_mode) { + ubi_warn("skip auto-resize because of R/O mode"); + return 0; + } + /* * Clear the auto-resize flag in the volume in-memory copy of the * volume table, and 'ubi_resize_volume()' will propagate this change diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 191f3bb3c41..cdea6692dea 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -628,6 +628,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi, if (req->alignment != 1 && n) goto bad; + if (!req->name[0] || !req->name_len) + goto bad; + if (req->name_len > UBI_VOL_NAME_MAX) { err = -ENAMETOOLONG; goto bad; diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 3f1a09c5c43..5f0e4c2d9cd 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -51,7 +51,10 @@ struct ubi_mkvol_req; pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__) /* Just a debugging messages not related to any specific UBI subsystem */ -#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__) +#define dbg_msg(fmt, ...) \ + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ + current->pid, __func__, ##__VA_ARGS__) + /* General debugging messages */ #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__) /* Messages from the eraseblock association sub-system */ diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 4be67181501..c696c9481c9 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the * LEB is already locked, we just do not move it and return - * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later. + * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because + * we do not know the reasons of the contention - it may be just a + * normal I/O on this LEB, so we want to re-try. */ err = leb_write_trylock(ubi, vol_id, lnum); if (err) { dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); - return MOVE_CANCEL_RACE; + return MOVE_RETRY; } /* diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 2135a53732f..0b49eadebc3 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -1174,7 +1174,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) - goto out_slab; + goto out_si; vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vidh) @@ -1235,8 +1235,6 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) ubi_free_vid_hdr(ubi, vidh); out_ech: kfree(ech); -out_slab: - kmem_cache_destroy(si->scan_leb_slab); out_si: ubi_scan_destroy_si(si); return ERR_PTR(err); @@ -1325,7 +1323,9 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si) } } - kmem_cache_destroy(si->scan_leb_slab); + if (si->scan_leb_slab) + kmem_cache_destroy(si->scan_leb_slab); + kfree(si); } diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index c6c22295898..bbfa88d459e 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -121,6 +121,7 @@ enum { * PEB * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the * target PEB + * MOVE_RETRY: retry scrubbing the PEB */ enum { MOVE_CANCEL_RACE = 1, @@ -128,6 +129,7 @@ enum { MOVE_TARGET_RD_ERR, MOVE_TARGET_WR_ERR, MOVE_CANCEL_BITFLIPS, + MOVE_RETRY, }; /** diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index fd3bf770f51..326bd937563 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -356,7 +356,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si, */ err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec, vid_hdr, 0); - kfree(new_seb); + kmem_cache_free(si->scan_leb_slab, new_seb); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -369,7 +369,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si, list_add(&new_seb->u.list, &si->erase); goto retry; } - kfree(new_seb); + kmem_cache_free(si->scan_leb_slab, new_seb); out_free: ubi_free_vid_hdr(ubi, vid_hdr); return err; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index ff2c4956eef..25f18e9b643 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -386,7 +386,7 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) */ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) { - int err, medium_ec; + int err; struct ubi_wl_entry *e, *first, *last; ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || @@ -424,7 +424,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) * For unknown data we pick a physical eraseblock with medium * erase counter. But we by no means can pick a physical * eraseblock with erase counter greater or equivalent than the - * lowest erase counter plus %WL_FREE_MAX_DIFF. + * lowest erase counter plus %WL_FREE_MAX_DIFF/2. */ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); @@ -433,10 +433,8 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) if (last->ec - first->ec < WL_FREE_MAX_DIFF) e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); - else { - medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; - e = find_wl_entry(&ubi->free, medium_ec); - } + else + e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); break; case UBI_SHORTTERM: /* @@ -792,7 +790,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, protect = 1; goto out_not_moved; } - + if (err == MOVE_RETRY) { + scrubbing = 1; + goto out_not_moved; + } if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || err == MOVE_TARGET_RD_ERR) { /* @@ -1046,7 +1047,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ubi_err("failed to erase PEB %d, error %d", pnum, err); kfree(wl_wrk); - kmem_cache_free(ubi_wl_entry_slab, e); if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || err == -EBUSY) { @@ -1059,14 +1059,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, goto out_ro; } return err; - } else if (err != -EIO) { + } + + kmem_cache_free(ubi_wl_entry_slab, e); + if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling * this physical eraseblock for erasure again would cause * errors again and again. Well, lets switch to R/O mode. */ goto out_ro; - } /* It is %-EIO, the PEB went bad */ diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 44b28b2d700..15410f74e35 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -309,6 +309,7 @@ static int __devinit el3_isa_match(struct device *pdev, if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, pdev); netdev_boot_setup_check(dev); if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { @@ -704,6 +705,7 @@ static int __init el3_eisa_probe (struct device *device) return -ENOMEM; } + SET_NETDEV_DEV(dev, device); netdev_boot_setup_check(dev); el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 8cc22568ebd..c83be409fc8 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -632,7 +632,6 @@ struct vortex_private { pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. @@ -951,7 +950,7 @@ static int __devexit vortex_eisa_remove(struct device *device) unregister_netdev(dev); iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); - release_region(dev->base_addr, VORTEX_TOTAL_SIZE); + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); free_netdev(dev); return 0; @@ -1012,6 +1011,12 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, if (rc < 0) goto out; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) { + pci_disable_device(pdev); + goto out; + } + unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { @@ -1027,6 +1032,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { + pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; @@ -1036,6 +1042,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); + pci_release_regions(pdev); pci_disable_device(pdev); goto out; } @@ -1180,11 +1187,6 @@ static int __devinit vortex_probe1(struct device *gendev, /* PCI-only startup logic */ if (pdev) { - /* EISA resources already marked, so only PCI needs to do this here */ - /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) - vp->must_free_region = 1; - /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); @@ -1222,7 +1224,7 @@ static int __devinit vortex_probe1(struct device *gendev, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) - goto free_region; + goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; @@ -1487,9 +1489,7 @@ static int __devinit vortex_probe1(struct device *gendev, + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); -free_region: - if (vp->must_free_region) - release_region(dev->base_addr, vci->io_size); +free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: @@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data) ok = 1; } - if (!netif_carrier_ok(dev)) + if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev)) next_tick = 5*HZ; if (vp->medialock) @@ -3253,8 +3253,9 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev) + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); - if (vp->must_free_region) - release_region(dev->base_addr, vp->io_size); + + pci_release_regions(pdev); + free_netdev(dev); } diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 10c45051cae..a8b82da3956 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -992,6 +992,11 @@ static inline void cp_start_hw (struct cp_private *cp) cpw8(Cmd, RxOn | TxOn); } +static void cp_enable_irq(struct cp_private *cp) +{ + cpw16_f(IntrMask, cp_intr_mask); +} + static void cp_init_hw (struct cp_private *cp) { struct net_device *dev = cp->dev; @@ -1031,8 +1036,6 @@ static void cp_init_hw (struct cp_private *cp) cpw16(MultiIntr, 0); - cpw16_f(IntrMask, cp_intr_mask); - cpw8_f(Cfg9346, Cfg9346_Lock); } @@ -1164,6 +1167,8 @@ static int cp_open (struct net_device *dev) if (rc) goto err_out_hw; + cp_enable_irq(cp); + netif_carrier_off(dev); mii_check_media(&cp->mii_if, netif_msg_link(cp), true); netif_start_queue(dev); @@ -2052,6 +2057,7 @@ static int cp_resume (struct pci_dev *pdev) /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ cp_init_rings_index (cp); cp_init_hw (cp); + cp_enable_irq(cp); netif_start_queue (dev); spin_lock_irqsave (&cp->lock, flags); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8046abac3d8..d62b86fff44 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2543,7 +2543,7 @@ config S6GMAC source "drivers/net/stmmac/Kconfig" config PCH_GBE - tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" depends on PCI select MII ---help--- @@ -2556,10 +2556,11 @@ config PCH_GBE This driver enables Gigabit Ethernet function. This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ - Output Hub), ML7223. - ML7223 IOH is for MP(Media Phone) use. - ML7223 is companion chip for Intel Atom E6xx series. - ML7223 is completely compatible for Intel EG20T PCH. + Output Hub), ML7223/ML7831. + ML7223 IOH is for MP(Media Phone) use. ML7831 IOH is for general + purpose use. + ML7223/ML7831 is companion chip for Intel Atom E6xx series. + ML7223/ML7831 is completely compatible for Intel EG20T PCH. config QFEC tristate "QFEC ethernet driver" diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 1269ba5d6e5..5e34e21f888 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -2223,10 +2223,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, dev_info(&adapter->pdev->dev, "tx locked\n"); return NETDEV_TX_LOCKED; } - if (skb->mark == 0x01) - type = atl1c_trans_high; - else - type = atl1c_trans_normal; if (atl1c_tpd_avail(adapter, type) < tpd_req) { /* no enough descriptor, just stop queue */ diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h index 490d3b38e0c..2cec0dfbbd3 100644 --- a/drivers/net/atl1e/atl1e.h +++ b/drivers/net/atl1e/atl1e.h @@ -186,7 +186,7 @@ struct atl1e_tpd_desc { /* how about 0x2000 */ #define MAX_TX_BUF_LEN 0x2000 #define MAX_TX_BUF_SHIFT 13 -/*#define MAX_TX_BUF_LEN 0x3000 */ +#define MAX_TSO_SEG_SIZE 0x3c00 /* rrs word 1 bit 0:31 */ #define RRS_RX_CSUM_MASK 0xFFFF @@ -439,7 +439,6 @@ struct atl1e_adapter { struct atl1e_hw hw; struct atl1e_hw_stats hw_stats; - bool have_msi; u32 wol; u16 link_speed; u16 link_duplex; diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 86a91228313..8fe1cd3da3a 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -1848,37 +1848,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) struct net_device *netdev = adapter->netdev; free_irq(adapter->pdev->irq, netdev); - - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); } static int atl1e_request_irq(struct atl1e_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; - int flags = 0; int err = 0; - adapter->have_msi = true; - err = pci_enable_msi(adapter->pdev); - if (err) { - netdev_dbg(adapter->netdev, - "Unable to allocate MSI interrupt Error: %d\n", err); - adapter->have_msi = false; - } else - netdev->irq = pdev->irq; - - - if (!adapter->have_msi) - flags |= IRQF_SHARED; - err = request_irq(adapter->pdev->irq, atl1e_intr, flags, - netdev->name, netdev); + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, + netdev->name, netdev); if (err) { netdev_dbg(adapter->netdev, "Unable to allocate interrupt Error: %d\n", err); - if (adapter->have_msi) - pci_disable_msi(adapter->pdev); return err; } netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); @@ -2351,6 +2333,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev, INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index cd5789ff372..48c27d34eac 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data) "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); + schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } } @@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data) "pcie DMA r/w error (status = 0x%x)\n", status); iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); + schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } @@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter) atl1_clean_rx_ring(adapter); } -static void atl1_tx_timeout_task(struct work_struct *work) +static void atl1_reset_dev_task(struct work_struct *work) { struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, tx_timeout_task); + container_of(work, struct atl1_adapter, reset_dev_task); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); @@ -3034,12 +3034,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev, (unsigned long)adapter); adapter->phy_timer_pending = false; - INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); + INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); - INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); - err = register_netdev(netdev); if (err) goto err_common; diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h index 68de8cbfb3e..c27b724a834 100644 --- a/drivers/net/atlx/atl1.h +++ b/drivers/net/atlx/atl1.h @@ -759,9 +759,8 @@ struct atl1_adapter { u16 link_speed; u16 link_duplex; spinlock_t lock; - struct work_struct tx_timeout_task; + struct work_struct reset_dev_task; struct work_struct link_chg_task; - struct work_struct pcie_dma_to_rst_task; struct timer_list phy_config_timer; bool phy_timer_pending; diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c index afb7f7dd1bb..2b7af060d49 100644 --- a/drivers/net/atlx/atlx.c +++ b/drivers/net/atlx/atlx.c @@ -193,7 +193,7 @@ static void atlx_tx_timeout(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ - schedule_work(&adapter->tx_timeout_task); + schedule_work(&adapter->reset_dev_task); } /* diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index a485f7fdaf3..2ce5db5e9c6 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -763,6 +763,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); if (copied) { + int gso_segs = skb_shinfo(skb)->gso_segs; + /* record the sent skb in the sent_skb table */ BUG_ON(tx_obj->sent_skb_list[start]); tx_obj->sent_skb_list[start] = skb; @@ -780,8 +782,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, be_txq_notify(adapter, txq->id, wrb_cnt); - be_tx_stats_update(adapter, wrb_cnt, copied, - skb_shinfo(skb)->gso_segs, stopped); + be_tx_stats_update(adapter, wrb_cnt, copied, gso_segs, stopped); } else { txq->head = start; dev_kfree_skb_any(skb); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 74580bb175f..c9b123c1608 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -5310,7 +5310,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) int k, last; if (skb == NULL) { - j++; + j = NEXT_TX_BD(j); continue; } @@ -5322,8 +5322,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp) tx_buf->skb = NULL; last = tx_buf->nr_frags; - j++; - for (k = 0; k < last; k++, j++) { + j = NEXT_TX_BD(j); + for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 2df9276720a..5e725e07d61 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -871,16 +871,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) } } -/* hw is a boolean parameter that determines whether we should try and - * set the hw address of the device as well as the hw address of the - * net_device - */ -static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw) +static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) { struct net_device *dev = slave->dev; struct sockaddr s_addr; - if (!hw) { + if (slave->bond->params.mode == BOND_MODE_TLB) { memcpy(dev->dev_addr, addr, dev->addr_len); return 0; } @@ -910,8 +906,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct u8 tmp_mac_addr[ETH_ALEN]; memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN); - alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled); - alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr); + alb_set_slave_mac_addr(slave2, tmp_mac_addr); } @@ -1058,8 +1054,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav /* Try setting slave mac to bond address and fall-through to code handling that situation below... */ - alb_set_slave_mac_addr(slave, bond->dev->dev_addr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(slave, bond->dev->dev_addr); } /* The slave's address is equal to the address of the bond. @@ -1095,8 +1090,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav } if (free_mac_slave) { - alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr); pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n", bond->dev->name, slave->dev->name, @@ -1452,8 +1446,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) { int res; - res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr, - bond->alb_info.rlb_enabled); + res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr); if (res) { return res; } @@ -1604,8 +1597,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave alb_swap_mac_addr(bond, swap_slave, new_slave); } else { /* set the new_slave to the bond mac address */ - alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); } if (swap_slave) { @@ -1665,8 +1657,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); } else { - alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, - bond->alb_info.rlb_enabled); + alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); read_lock(&bond->lock); alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2065cb4002b..f271e44f797 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -77,6 +77,7 @@ #include #include #include +#include #include "bonding.h" #include "bond_3ad.h" #include "bond_alb.h" @@ -388,8 +389,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr) return next; } -#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb)) - /** * bond_dev_queue_xmit - Prepare skb for xmit. * @@ -403,7 +402,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, skb->dev = slave_dev; skb->priority = 1; - skb->queue_mapping = bond_queue_mapping(skb); + BUILD_BUG_ON(sizeof(skb->queue_mapping) != + sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); + skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; if (unlikely(netpoll_tx_running(slave_dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); @@ -1438,6 +1439,8 @@ static void bond_compute_features(struct bonding *bond) struct net_device *bond_dev = bond->dev; u32 vlan_features = BOND_VLAN_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; + unsigned int gso_max_size = GSO_MAX_SIZE; + u16 gso_max_segs = GSO_MAX_SEGS; int i; read_lock(&bond->lock); @@ -1451,11 +1454,16 @@ static void bond_compute_features(struct bonding *bond) if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; + + gso_max_size = min(gso_max_size, slave->dev->gso_max_size); + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs); } done: bond_dev->vlan_features = vlan_features; bond_dev->hard_header_len = max_hard_header_len; + bond_dev->gso_max_segs = gso_max_segs; + netif_set_gso_max_size(bond_dev, gso_max_size); read_unlock(&bond->lock); @@ -1905,7 +1913,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) "but new slave device does not support netpoll.\n", bond_dev->name); res = -EBUSY; - goto err_close; + goto err_detach; } } #endif @@ -1914,7 +1922,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_close; + goto err_detach; res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); @@ -1935,7 +1943,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); +err_detach: + write_lock_bh(&bond->lock); + bond_detach_slave(bond, new_slave); + write_unlock_bh(&bond->lock); + err_close: + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_unset_master: @@ -2004,12 +2018,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) return -EINVAL; } + write_unlock_bh(&bond->lock); /* unregister rx_handler early so bond_handle_frame wouldn't be called * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); - write_unlock_bh(&bond->lock); - synchronize_net(); write_lock_bh(&bond->lock); if (!bond->params.fail_over_mac) { @@ -3071,7 +3084,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) trans_start + delta_in_ticks)) || bond->curr_active_slave != slave) { slave->link = BOND_LINK_UP; - bond->current_arp_slave = NULL; + if (bond->current_arp_slave) { + bond_set_slave_inactive_flags( + bond->current_arp_slave); + bond->current_arp_slave = NULL; + } pr_info("%s: link status definitely up for interface %s.\n", bond->dev->name, slave->dev->name); @@ -4231,7 +4248,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) /* * Save the original txq to restore before passing to the driver */ - bond_queue_mapping(skb) = skb->queue_mapping; + qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { do { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 06246109538..8a967358a80 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1524,6 +1524,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, goto out; } + read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (!bond_is_active_slave(slave)) { if (new_value) @@ -1532,6 +1533,7 @@ static ssize_t bonding_store_slaves_active(struct device *d, slave->inactive = 1; } } + read_unlock(&bond->lock); out: return ret; } diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 3df0c0f8b8b..82b1802b1d3 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty) sprintf(name, "cf%s", tty->name); dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); + if (!dev) + return -ENOMEM; + ser = netdev_priv(dev); ser->tty = tty_kref_get(tty); ser->dev = dev; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 7e5cc0bd913..61958684ab1 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -592,8 +592,8 @@ static void c_can_chip_config(struct net_device *dev) priv->write_reg(priv, &priv->regs->control, CONTROL_ENABLE_AR); - if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & - CAN_CTRLMODE_LOOPBACK)) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && + (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { /* loopback + silent mode : useful for hot self-test */ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | CONTROL_SIE | CONTROL_IE | CONTROL_TEST); @@ -688,7 +688,7 @@ static int c_can_get_berr_counter(const struct net_device *dev, * * We iterate from priv->tx_echo to priv->tx_next and check if the * packet has been transmitted, echo it back to the CAN framework. - * If we discover a not yet transmitted package, stop looking for more. + * If we discover a not yet transmitted packet, stop looking for more. */ static void c_can_do_tx(struct net_device *dev) { @@ -700,7 +700,7 @@ static void c_can_do_tx(struct net_device *dev) for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { msg_obj_no = get_tx_echo_msg_obj(priv); val = c_can_read_reg32(priv, &priv->regs->txrqst1); - if (!(val & (1 << msg_obj_no))) { + if (!(val & (1 << (msg_obj_no - 1)))) { can_get_echo_skb(dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); stats->tx_bytes += priv->read_reg(priv, @@ -708,6 +708,8 @@ static void c_can_do_tx(struct net_device *dev) & IF_MCONT_DLC_MASK; stats->tx_packets++; c_can_inval_msg_object(dev, 0, msg_obj_no); + } else { + break; } } @@ -914,7 +916,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | CAN_ERR_PROT_LOC_ACK_DEL); break; case LEC_BIT1_ERROR: @@ -927,7 +929,7 @@ static int c_can_handle_bus_err(struct net_device *dev, break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL); break; default: @@ -952,7 +954,7 @@ static int c_can_poll(struct napi_struct *napi, int quota) struct net_device *dev = napi->dev; struct c_can_priv *priv = netdev_priv(dev); - irqstatus = priv->read_reg(priv, &priv->regs->interrupt); + irqstatus = priv->irqstatus; if (!irqstatus) goto end; @@ -1030,12 +1032,11 @@ static int c_can_poll(struct napi_struct *napi, int quota) static irqreturn_t c_can_isr(int irq, void *dev_id) { - u16 irqstatus; struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); - irqstatus = priv->read_reg(priv, &priv->regs->interrupt); - if (!irqstatus) + priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt); + if (!priv->irqstatus) return IRQ_NONE; /* disable all interrupts and schedule the NAPI */ @@ -1065,10 +1066,11 @@ static int c_can_open(struct net_device *dev) goto exit_irq_fail; } + napi_enable(&priv->napi); + /* start the c_can controller */ c_can_start(dev); - napi_enable(&priv->napi); netif_start_queue(dev); return 0; diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 9b7fbef3d09..5f32d34af50 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -76,6 +76,7 @@ struct c_can_priv { unsigned int tx_next; unsigned int tx_echo; void *priv; /* for board-specific data */ + u16 irqstatus; }; struct net_device *alloc_c_can_dev(void); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index d0f8c7e67e7..cc3ea0d5fd8 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -557,8 +557,7 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - if (del_timer_sync(&priv->restart_timer)) - dev_put(dev); + del_timer_sync(&priv->restart_timer); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index f1942cab35f..b4159a61318 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1249,7 +1249,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id) */ static int ican3_reset_module(struct ican3_dev *mod) { - u8 val = 1 << mod->num; unsigned long start; u8 runold, runnew; @@ -1263,8 +1262,7 @@ static int ican3_reset_module(struct ican3_dev *mod) runold = ioread8(mod->dpm + TARGET_RUNNING); /* reset the module */ - iowrite8(val, &mod->ctrl->reset_assert); - iowrite8(val, &mod->ctrl->reset_deassert); + iowrite8(0x00, &mod->dpmctrl->hwreset); /* wait until the module has finished resetting and is running */ start = jiffies; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 330140ee266..9bcc39a07c4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -83,6 +83,11 @@ #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) #define INSTRUCTION_RESET 0xC0 +#define RTS_TXB0 0x01 +#define RTS_TXB1 0x02 +#define RTS_TXB2 0x04 +#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) + /* MPC251x registers */ #define CANSTAT 0x0e @@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, int tx_buf_idx) { + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); u32 sid, eid, exide, rtr; u8 buf[SPI_TRANSFER_BUF_LEN]; @@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); - mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ); + + /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ + priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); + mcp251x_spi_trans(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 5fedc337556..d8f2b5b1b62 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, if (!clock_name || !strcmp(clock_name, "sys")) { sys_clk = clk_get(&ofdev->dev, "sys_clk"); - if (!sys_clk) { + if (IS_ERR(sys_clk)) { dev_err(&ofdev->dev, "couldn't get sys_clk\n"); goto exit_unmap; } @@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, if (clocksrc < 0) { ref_clk = clk_get(&ofdev->dev, "ref_clk"); - if (!ref_clk) { + if (IS_ERR(ref_clk)) { dev_err(&ofdev->dev, "couldn't get ref_clk\n"); goto exit_unmap; } diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index d11fbb2b95f..b508a6380f8 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -559,7 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) stats->rx_errors++; break; case PCH_CRC_ERR: - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; priv->can.can_stats.bus_error++; stats->rx_errors++; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index f7bbde9eb2c..10b23947d8f 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -734,12 +734,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } if (err_status & HECC_CANES_CRCE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); - cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; } if (err_status & HECC_CANES_ACKE) { hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); - cf->data[2] |= CAN_ERR_PROT_LOC_ACK | + cf->data[3] |= CAN_ERR_PROT_LOC_ACK | CAN_ERR_PROT_LOC_ACK_DEL; } } @@ -969,12 +969,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); + unregister_candev(ndev); clk_disable(priv->clk); clk_put(priv->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(priv->base); release_mem_region(res->start, resource_size(res)); - unregister_candev(ndev); free_candev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 3f2e12c3ac1..015b5152b0d 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -971,7 +971,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event, case (NETEVENT_REDIRECT):{ struct netevent_redirect *nr = ctx; cxgb_redirect(nr->old, nr->new); - cxgb_neigh_update(nr->new->neighbour); + cxgb_neigh_update(dst_get_neighbour(nr->new)); break; } default: @@ -1116,8 +1116,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) struct l2t_entry *e; struct t3c_tid_entry *te; - olddev = old->neighbour->dev; - newdev = new->neighbour->dev; + olddev = dst_get_neighbour(old)->dev; + newdev = dst_get_neighbour(new)->dev; if (!is_offloading(olddev)) return; if (!is_offloading(newdev)) { @@ -1134,7 +1134,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) } /* Add new L2T entry */ - e = t3_l2t_get(tdev, new->neighbour, newdev); + e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev); if (!e) { printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", __func__); diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c index ae47f23ba93..6b67c526c46 100644 --- a/drivers/net/davinci_cpdma.c +++ b/drivers/net/davinci_cpdma.c @@ -849,6 +849,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) next_dma = desc_read(desc, hw_next); chan->head = desc_from_phys(pool, next_dma); + chan->count--; chan->stats.teardown_dequeue++; /* issue callback without locks held */ diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index dcc4a170b0f..e5d0eede044 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) int ret; /* free and bail if we are shutting down */ - if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { + if (unlikely(!netif_running(ndev))) { dev_kfree_skb_any(skb); return; } @@ -1037,7 +1037,9 @@ static void emac_rx_handler(void *token, int len, int status) recycle: ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, skb_tailroom(skb), GFP_KERNEL); - if (WARN_ON(ret < 0)) + + WARN_ON(ret == -ENOMEM); + if (unlikely(ret < 0)) dev_kfree_skb_any(skb); } @@ -1047,7 +1049,7 @@ static void emac_tx_handler(void *token, int len, int status) struct net_device *ndev = skb->dev; if (unlikely(netif_queue_stopped(ndev))) - netif_start_queue(ndev); + netif_wake_queue(ndev); ndev->stats.tx_packets++; ndev->stats.tx_bytes += len; dev_kfree_skb_any(skb); diff --git a/drivers/net/davinci_mdio.c b/drivers/net/davinci_mdio.c index 7615040df75..f470ab64b09 100644 --- a/drivers/net/davinci_mdio.c +++ b/drivers/net/davinci_mdio.c @@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) __davinci_mdio_reset(data); return -EAGAIN; } + + reg = __raw_readl(®s->user[0].access); + if ((reg & USERACCESS_GO) == 0) + return 0; + dev_err(data->dev, "timed out waiting for user access\n"); return -ETIMEDOUT; } diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 39cf9b9bd67..098ff315694 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -37,6 +37,7 @@ #include #include #include +#include static int numdummies = 1; @@ -106,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev) return 0; } -static void dummy_dev_free(struct net_device *dev) +static void dummy_dev_uninit(struct net_device *dev) { free_percpu(dev->dstats); - free_netdev(dev); } static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, + .ndo_uninit = dummy_dev_uninit, .ndo_start_xmit = dummy_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_multicast_list = set_multicast_list, @@ -127,7 +128,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; - dev->destructor = dummy_dev_free; + dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->tx_queue_len = 0; @@ -186,8 +187,10 @@ static int __init dummy_init_module(void) rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); - for (i = 0; i < numdummies && !err; i++) + for (i = 0; i < numdummies && !err; i++) { err = dummy_init_one(); + cond_resched(); + } if (err < 0) __rtnl_link_unregister(&dummy_link_ops); rtnl_unlock(); diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 8676899120c..2c71884eb46 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -150,6 +150,8 @@ struct e1000_buffer { unsigned long time_stamp; u16 length; u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; u16 mapped_as_page; }; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 76e8af00d86..99525f9b41b 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2798,7 +2798,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; unsigned int len = skb_headlen(skb); unsigned int offset = 0, size, count = 0, i; - unsigned int f; + unsigned int f, bytecount, segs; i = tx_ring->next_to_use; @@ -2899,7 +2899,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter, } } + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; tx_ring->buffer_info[first].next_to_watch = i; return count; @@ -3573,14 +3579,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, cleaned = (i == eop); if (cleaned) { - struct sk_buff *skb = buffer_info->skb; - unsigned int segs, bytecount; - segs = skb_shinfo(skb)->gso_segs ?: 1; - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; - total_tx_packets += segs; - total_tx_bytes += bytecount; + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; } e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 8295f219243..8402d19d3d6 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -1573,6 +1573,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) ctrl = er32(CTRL); status = er32(STATUS); rxcw = er32(RXCW); + /* SYNCH bit and IV bit are sticky */ + udelay(10); + rxcw = er32(RXCW); if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { @@ -1599,10 +1602,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); @@ -2087,7 +2088,8 @@ struct e1000_info e1000_82574_info = { | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_CHECK_PHY_HANG - | FLAG2_DISABLE_ASPM_L0S, + | FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 9549879e66a..8a265f3528d 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -311,6 +311,7 @@ struct e1000_adapter { u32 txd_cmd; bool detect_tx_hung; + bool tx_hang_recheck; u8 tx_timeout_factor; u32 tx_int_delay; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3310c3d477d..4ef255214ed 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -930,6 +930,7 @@ static void e1000_print_hw_hang(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, print_hang_task); + struct net_device *netdev = adapter->netdev; struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int i = tx_ring->next_to_clean; unsigned int eop = tx_ring->buffer_info[i].next_to_watch; @@ -941,6 +942,21 @@ static void e1000_print_hw_hang(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->state)) return; + if (!adapter->tx_hang_recheck && + (adapter->flags2 & FLAG2_DMA_BURST)) { + /* May be block on write-back, flush and detect again + * flush pending descriptor writebacks to memory + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + adapter->tx_hang_recheck = true; + return; + } + /* Real hang detected */ + adapter->tx_hang_recheck = false; + netif_stop_queue(netdev); + e1e_rphy(hw, PHY_STATUS, &phy_status); e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); @@ -1054,10 +1070,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) if (tx_ring->buffer_info[i].time_stamp && time_after(jiffies, tx_ring->buffer_info[i].time_stamp + (adapter->tx_timeout_factor * HZ)) && - !(er32(STATUS) & E1000_STATUS_TXOFF)) { + !(er32(STATUS) & E1000_STATUS_TXOFF)) schedule_work(&adapter->print_hang_task); - netif_stop_queue(netdev); - } + else + adapter->tx_hang_recheck = false; } adapter->total_tx_bytes += total_tx_bytes; adapter->total_tx_packets += total_tx_packets; @@ -3678,6 +3694,7 @@ static int e1000_open(struct net_device *netdev) e1000_irq_enable(adapter); + adapter->tx_hang_recheck = false; netif_start_queue(netdev); adapter->idle_check = true; @@ -5313,7 +5330,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, */ e1000e_release_hw_control(adapter); - pci_disable_device(pdev); + pci_clear_master(pdev); return 0; } diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 97f46ac4f21..a5d98a348a8 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -4521,11 +4521,13 @@ void igb_update_stats(struct igb_adapter *adapter, bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; - ring->rx_stats.drops += rqdpc_tmp; - net_stats->rx_fifo_errors += rqdpc_tmp; + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } do { start = u64_stats_fetch_begin_bh(&ring->rx_syncp); diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index efe05bb34dd..ddec154f1b6 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -221,7 +221,7 @@ static void sirdev_config_fsm(struct work_struct *work) break; case SIRDEV_STATE_DONGLE_SPEED: - if (dev->dongle_drv->reset) { + if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 8ee661245af..4adce71a266 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -360,6 +360,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_SFP_SF_QP: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82599_CX4: diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index b894b42a741..864403da9cd 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -3181,6 +3181,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 83f197d03d4..f0b0ff36e50 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -129,6 +129,12 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), + board_X540 }, /* required last entry */ {0, } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index fa43f2507f4..1ea15776837 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -59,11 +59,14 @@ #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C #define IXGBE_DEV_ID_82599_LS 0x154F +#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A #define IXGBE_DEV_ID_X540T 0x1528 +#define IXGBE_DEV_ID_X540T1 0x1560 /* General Registers */ #define IXGBE_CTRL 0x00000 diff --git a/drivers/net/jme.c b/drivers/net/jme.c index 19738143aa9..1d1ccec6072 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -2228,19 +2228,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu) ((new_mtu) < IPV6_MIN_MTU)) return -EINVAL; - if (new_mtu > 4000) { - jme->reg_rxcs &= ~RXCS_FIFOTHNP; - jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; - jme_restart_rx_engine(jme); - } else { - jme->reg_rxcs &= ~RXCS_FIFOTHNP; - jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; - jme_restart_rx_engine(jme); - } netdev->mtu = new_mtu; netdev_update_features(netdev); + jme_restart_rx_engine(jme); jme_reset_link(jme); return 0; diff --git a/drivers/net/jme.h b/drivers/net/jme.h index e9aaeca96ab..fff885e9274 100644 --- a/drivers/net/jme.h +++ b/drivers/net/jme.h @@ -734,7 +734,7 @@ enum jme_rxcs_values { RXCS_RETRYCNT_60 = 0x00000F00, RXCS_DEFAULT = RXCS_FIFOTHTP_128T | - RXCS_FIFOTHNP_128QW | + RXCS_FIFOTHNP_16QW | RXCS_DMAREQSZ_128B | RXCS_RETRYGAP_256ns | RXCS_RETRYCNT_32, diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index bcd9ba68c9f..99593f00a43 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -489,7 +489,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) for (; rxfc != 0; rxfc--) { rxh = ks8851_rdreg32(ks, KS_RXFHSR); rxstat = rxh & 0xffff; - rxlen = rxh >> 16; + rxlen = (rxh >> 16) & 0xfff; netif_dbg(ks, rx_status, ks->netdev, "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c index 61631cace91..3eacbb4fff9 100644 --- a/drivers/net/ks8851_mll.c +++ b/drivers/net/ks8851_mll.c @@ -38,7 +38,7 @@ #define DRV_NAME "ks8851_mll" static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; -#define MAX_RECV_FRAMES 32 +#define MAX_RECV_FRAMES 255 #define MAX_BUF_SIZE 2048 #define TX_BUF_SIZE 2000 #define RX_BUF_SIZE 2000 diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 41ea5920c15..95b6664e936 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c @@ -5679,7 +5679,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN); } - memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); + memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); interrupt = hw_block_intr(hw); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 4ce9e5f2c06..d0893e4c933 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -78,6 +78,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, skb_orphan(skb); + /* Before queueing this packet to netif_rx(), + * make sure dst is refcounted. + */ + skb_dst_force(skb); + skb->protocol = eth_type_trans(skb, dev); /* it's OK to use per_cpu_ptr() because BHs are off */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index ab4723d92a6..4c0bdaca2d1 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -193,7 +193,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } if (port->passthru) - vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); + vlan = list_first_or_null_rcu(&port->vlans, + struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) @@ -247,7 +248,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) xmit_world: skb->ip_summed = ip_summed; - skb_set_dev(skb, vlan->lowerdev); + skb->dev = vlan->lowerdev; return dev_queue_xmit(skb); } @@ -687,7 +688,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto destroy_port; - list_add_tail(&vlan->list, &port->vlans); + list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; @@ -713,7 +714,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); - list_del(&vlan->list); + list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); } EXPORT_SYMBOL_GPL(macvlan_dellink); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 6696e56e632..023b57e2f76 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -552,6 +552,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, if (unlikely(len < ETH_HLEN)) goto err; + err = -EMSGSIZE; + if (unlikely(count > UIO_MAXIOV)) + goto err; + skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len, noblock, &err); if (!skb) diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 1063093b3af..e8ee2bc30ba 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -814,6 +814,7 @@ static int __init ne_drv_probe(struct platform_device *pdev) dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; } + SET_NETDEV_DEV(dev, &pdev->dev); err = do_ne_probe(dev); if (err) { free_netdev(dev); diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 4840ab7e7f8..8824dd41a5a 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -630,6 +630,7 @@ static int netconsole_netdev_event(struct notifier_block *this, goto done; spin_lock_irqsave(&target_list_lock, flags); +restart: list_for_each_entry(nt, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { @@ -642,21 +643,17 @@ static int netconsole_netdev_event(struct notifier_block *this, case NETDEV_UNREGISTER: /* * rtnl_lock already held + * we might sleep in __netpoll_cleanup() */ - if (nt->np.dev) { - spin_unlock_irqrestore( - &target_list_lock, - flags); - __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, - flags); - dev_put(nt->np.dev); - nt->np.dev = NULL; - netconsole_target_put(nt); - } + spin_unlock_irqrestore(&target_list_lock, flags); + __netpoll_cleanup(&nt->np); + spin_lock_irqsave(&target_list_lock, flags); + dev_put(nt->np.dev); + nt->np.dev = NULL; nt->enabled = 0; stopped = true; - break; + netconsole_target_put(nt); + goto restart; } } netconsole_target_put(nt); diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index c0788a31ff0..78d5b674757 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1288,6 +1288,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) struct pci_dev *root = pdev->bus->self; u32 aer_pos; + /* root bus? */ + if (!root) + return; + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) return; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index eac3c5ca973..0055daf12d5 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -39,6 +39,9 @@ const char pch_driver_version[] = DRV_VERSION; #define PCI_VENDOR_ID_ROHM 0x10db #define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 +/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 + #define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 @@ -717,13 +720,6 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) iowrite32(rdba, &hw->reg->RX_DSC_BASE); iowrite32(rdlen, &hw->reg->RX_DSC_SIZE); iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P); - - /* Enables Receive DMA */ - rxdma = ioread32(&hw->reg->DMA_CTRL); - rxdma |= PCH_GBE_RX_DMA_EN; - iowrite32(rxdma, &hw->reg->DMA_CTRL); - /* Enables Receive */ - iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); } /** @@ -1097,6 +1093,19 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } +static void pch_gbe_start_receive(struct pch_gbe_hw *hw) +{ + u32 rxdma; + + /* Enables Receive DMA */ + rxdma = ioread32(&hw->reg->DMA_CTRL); + rxdma |= PCH_GBE_RX_DMA_EN; + iowrite32(rxdma, &hw->reg->DMA_CTRL); + /* Enables Receive */ + iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN); + return; +} + /** * pch_gbe_intr - Interrupt Handler * @irq: Interrupt number @@ -1500,9 +1509,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) - skb->ip_summed = CHECKSUM_NONE; - else skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; napi_gro_receive(&adapter->napi, skb); (*work_done)++; @@ -1701,6 +1710,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; int err; + /* Ensure we have a valid MAC */ + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + pr_err("Error: Invalid MAC address\n"); + return -EINVAL; + } + /* hardware has been reset, we need to reload some things */ pch_gbe_set_multi(netdev); @@ -1717,6 +1732,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter) pch_gbe_alloc_tx_buffers(adapter, tx_ring); pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); adapter->tx_queue_len = netdev->tx_queue_len; + pch_gbe_start_receive(&adapter->hw); mod_timer(&adapter->watchdog_timer, jiffies); @@ -2118,7 +2134,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) /* If no Tx and not enough Rx work done, * exit the polling mode */ - if ((work_done < budget) || !netif_running(netdev)) + if (work_done < budget) poll_end_flag = true; } @@ -2392,9 +2408,14 @@ static int pch_gbe_probe(struct pci_dev *pdev, memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { - dev_err(&pdev->dev, "Invalid MAC Address\n"); - ret = -EIO; - goto err_free_adapter; + /* + * If the MAC is invalid (or just missing), display a warning + * but do not abort setting up the device. pch_gbe_up will + * prevent the interface from being brought up until a valid MAC + * is set. + */ + dev_err(&pdev->dev, "Invalid MAC address, " + "interface disabled.\n"); } setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog, (unsigned long)adapter); @@ -2452,6 +2473,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = (0xFFFF00) }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7831_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, /* required last entry */ {0} }; diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c index 5b5d90a47e2..fb74ef9c81a 100644 --- a/drivers/net/pch_gbe/pch_gbe_param.c +++ b/drivers/net/pch_gbe/pch_gbe_param.c @@ -320,10 +320,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter) pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n"); hw->phy.autoneg_advertised = opt.def; } else { - hw->phy.autoneg_advertised = AutoNeg; - pch_gbe_validate_option( - (int *)(&hw->phy.autoneg_advertised), - &opt, adapter); + int tmp = AutoNeg; + + pch_gbe_validate_option(&tmp, &opt, adapter); + hw->phy.autoneg_advertised = tmp; } } @@ -494,9 +494,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list), .p = fc_list } } }; - hw->mac.fc = FlowControl; - pch_gbe_validate_option((int *)(&hw->mac.fc), - &opt, adapter); + int tmp = FlowControl; + + pch_gbe_validate_option(&tmp, &opt, adapter); + hw->mac.fc = tmp; } pch_gbe_check_copper_options(adapter); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 47c8339a035..2843c90f712 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -241,7 +241,7 @@ MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); static struct platform_driver mdio_ofgpio_driver = { .driver = { - .name = "mdio-gpio", + .name = "mdio-ofgpio", .owner = THIS_MODULE, .of_match_table = mdio_ofgpio_match, }, diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 571779f38d9..7b2f5ee39d3 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1008,7 +1008,6 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) proto = npindex_to_proto[npi]; put_unaligned_be16(proto, pp); - netif_stop_queue(dev); skb_queue_tail(&ppp->file.xq, skb); ppp_xmit_process(ppp); return NETDEV_TX_OK; @@ -1103,6 +1102,8 @@ ppp_xmit_process(struct ppp *ppp) code that we can accept some more. */ if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) netif_wake_queue(ppp->dev); + else + netif_stop_queue(ppp->dev); } ppp_xmit_unlock(ppp); } @@ -2072,14 +2073,22 @@ ppp_mp_reconstruct(struct ppp *ppp) continue; } if (PPP_MP_CB(p)->sequence != seq) { + u32 oldseq; /* Fragment `seq' is missing. If it is after minseq, it might arrive later, so stop here. */ if (seq_after(seq, minseq)) break; /* Fragment `seq' is lost, keep going. */ lost = 1; + oldseq = seq; seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? minseq + 1: PPP_MP_CB(p)->sequence; + + if (ppp->debug & 1) + netdev_printk(KERN_DEBUG, ppp->dev, + "lost frag %u..%u\n", + oldseq, seq-1); + goto again; } @@ -2124,6 +2133,10 @@ ppp_mp_reconstruct(struct ppp *ppp) struct sk_buff *tmp2; skb_queue_reverse_walk_from_safe(list, p, tmp2) { + if (ppp->debug & 1) + netdev_printk(KERN_DEBUG, ppp->dev, + "discarding frag %u\n", + PPP_MP_CB(p)->sequence); __skb_unlink(p, list); kfree_skb(p); } @@ -2139,6 +2152,17 @@ ppp_mp_reconstruct(struct ppp *ppp) /* If we have discarded any fragments, signal a receive error. */ if (PPP_MP_CB(head)->sequence != ppp->nextseq) { + skb_queue_walk_safe(list, p, tmp) { + if (p == head) + break; + if (ppp->debug & 1) + netdev_printk(KERN_DEBUG, ppp->dev, + "discarding frag %u\n", + PPP_MP_CB(p)->sequence); + __skb_unlink(p, list); + kfree_skb(p); + } + if (ppp->debug & 1) netdev_printk(KERN_DEBUG, ppp->dev, " missed pkts %u..%u\n", diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 2d6fd087b59..fc3b7a5cb5e 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock) po = pppox_sk(sk); - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 5f838ef9249..bf67991ef99 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -58,8 +58,12 @@ #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) -#define TX_BUFFS_AVAIL(tp) \ - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -71,7 +75,7 @@ static const int multicast_filter_limit = 32; #define MAX_READ_REQUEST_SHIFT 12 #define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ -#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -140,82 +144,101 @@ enum rtl_tx_desc_version { RTL_TD_1 = 1, }; -#define _R(NAME,TD,FW) \ - { .name = NAME, .txd_version = TD, .fw_name = FW } +#define JUMBO_1K ETH_DATA_LEN +#define JUMBO_4K (4*1024 - ETH_HLEN - 2) +#define JUMBO_6K (6*1024 - ETH_HLEN - 2) +#define JUMBO_7K (7*1024 - ETH_HLEN - 2) +#define JUMBO_9K (9*1024 - ETH_HLEN - 2) + +#define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ +} static const struct { const char *name; enum rtl_tx_desc_version txd_version; const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; } rtl_chip_infos[] = { /* PCI devices. */ [RTL_GIGA_MAC_VER_01] = - _R("RTL8169", RTL_TD_0, NULL), + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), [RTL_GIGA_MAC_VER_02] = - _R("RTL8169s", RTL_TD_0, NULL), + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), [RTL_GIGA_MAC_VER_03] = - _R("RTL8110s", RTL_TD_0, NULL), + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), [RTL_GIGA_MAC_VER_04] = - _R("RTL8169sb/8110sb", RTL_TD_0, NULL), + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), [RTL_GIGA_MAC_VER_05] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL), + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), [RTL_GIGA_MAC_VER_06] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL), + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), /* PCI-E devices. */ [RTL_GIGA_MAC_VER_07] = - _R("RTL8102e", RTL_TD_1, NULL), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_08] = - _R("RTL8102e", RTL_TD_1, NULL), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_09] = - _R("RTL8102e", RTL_TD_1, NULL), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_10] = - _R("RTL8101e", RTL_TD_0, NULL), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_11] = - _R("RTL8168b/8111b", RTL_TD_0, NULL), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), [RTL_GIGA_MAC_VER_12] = - _R("RTL8168b/8111b", RTL_TD_0, NULL), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), [RTL_GIGA_MAC_VER_13] = - _R("RTL8101e", RTL_TD_0, NULL), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_14] = - _R("RTL8100e", RTL_TD_0, NULL), + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_15] = - _R("RTL8100e", RTL_TD_0, NULL), + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_16] = - _R("RTL8101e", RTL_TD_0, NULL), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_0, NULL), + _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), [RTL_GIGA_MAC_VER_18] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_19] = - _R("RTL8168c/8111c", RTL_TD_1, NULL), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_20] = - _R("RTL8168c/8111c", RTL_TD_1, NULL), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_21] = - _R("RTL8168c/8111c", RTL_TD_1, NULL), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_22] = - _R("RTL8168c/8111c", RTL_TD_1, NULL), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_23] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_24] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_25] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1), + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), [RTL_GIGA_MAC_VER_26] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2), + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), [RTL_GIGA_MAC_VER_27] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), [RTL_GIGA_MAC_VER_28] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), [RTL_GIGA_MAC_VER_29] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1), + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), [RTL_GIGA_MAC_VER_30] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1), + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), [RTL_GIGA_MAC_VER_31] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), [RTL_GIGA_MAC_VER_32] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1), + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), [RTL_GIGA_MAC_VER_33] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2) + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false) }; #undef _R @@ -280,6 +303,8 @@ enum rtl_registers { Config0 = 0x51, Config1 = 0x52, Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + Config3 = 0x54, Config4 = 0x55, Config5 = 0x56, @@ -388,6 +413,7 @@ enum rtl_register_content { RxOK = 0x0001, /* RxStatusDesc */ + RxBOVF = (1 << 24), RxFOVF = (1 << 23), RxRWT = (1 << 22), RxRES = (1 << 21), @@ -428,7 +454,6 @@ enum rtl_register_content { /* Config1 register p.24 */ LEDS1 = (1 << 7), LEDS0 = (1 << 6), - MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ Speed_down = (1 << 4), MEMMAP = (1 << 3), IOMAP = (1 << 2), @@ -436,14 +461,19 @@ enum rtl_register_content { PMEnable = (1 << 0), /* Power Management Enable */ /* Config2 register p. 25 */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ PCI_Clock_66MHz = 0x01, PCI_Clock_33MHz = 0x00, /* Config3 register p.25 */ MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + /* Config5 register p.27 */ BWF = (1 << 6), /* Accept Broadcast wakeup frame */ MWF = (1 << 5), /* Accept Multicast wakeup frame */ @@ -652,6 +682,11 @@ struct rtl8169_private { void (*up)(struct rtl8169_private *); } pll_power_ops; + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_settings)(struct net_device *, struct ethtool_cmd *); void (*phy_reset_enable)(struct rtl8169_private *tp); @@ -666,6 +701,7 @@ struct rtl8169_private { struct mii_if_info mii; struct rtl8169_counters counters; u32 saved_wolopts; + u32 opts1_mask; const struct firmware *fw; #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN); @@ -705,6 +741,21 @@ static int rtl8169_poll(struct napi_struct *napi, int budget); static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); +static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + int cap = tp->pcie_cap; + + if (cap) { + u16 ctl; + + pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); + ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; + pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); + } +} + static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) { void __iomem *ioaddr = tp->mmio_addr; @@ -1043,17 +1094,21 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) return value; } -static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { - RTL_W16(IntrMask, 0x0000); + void __iomem *ioaddr = tp->mmio_addr; - RTL_W16(IntrStatus, 0xffff); + RTL_W16(IntrMask, 0x0000); + RTL_W16(IntrStatus, tp->intr_event); + RTL_R8(ChipCmd); } -static void rtl8169_asic_down(void __iomem *ioaddr) +static void rtl8169_asic_down(struct rtl8169_private *tp) { + void __iomem *ioaddr = tp->mmio_addr; + RTL_W8(ChipCmd, 0x00); - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); RTL_R16(CPlusCmd); } @@ -1112,7 +1167,7 @@ static void __rtl8169_check_link_status(struct net_device *dev, netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); if (pm) - pm_schedule_suspend(&tp->pci_dev->dev, 100); + pm_schedule_suspend(&tp->pci_dev->dev, 5000); } spin_unlock_irqrestore(&tp->lock, flags); } @@ -1174,7 +1229,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) u16 reg; u8 mask; } cfg[] = { - { WAKE_ANY, Config1, PMEnable }, { WAKE_PHY, Config3, LinkUp }, { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, @@ -1182,16 +1236,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { WAKE_MCAST, Config5, MWF }, { WAKE_ANY, Config5, LanWake } }; + u8 options; RTL_W8(Cfg9346, Cfg9346_Unlock); for (i = 0; i < ARRAY_SIZE(cfg); i++) { - u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(cfg[i].reg, options); } + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + RTL_W8(Cfg9346, Cfg9346_Lock); } @@ -1373,9 +1443,15 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) static u32 rtl8169_fix_features(struct net_device *dev, u32 features) { + struct rtl8169_private *tp = netdev_priv(dev); + if (dev->mtu > TD_MSS_MAX) features &= ~NETIF_F_ALL_TSO; + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + return features; } @@ -1418,8 +1494,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) if (opts2 & RxVlanTag) __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); - - desc->opts2 = 0; } static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) @@ -2948,22 +3022,24 @@ static const struct rtl_cfg_info { }; /* Cfg9346_Unlock assumed. */ -static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, +static unsigned rtl_try_msi(struct rtl8169_private *tp, const struct rtl_cfg_info *cfg) { + void __iomem *ioaddr = tp->mmio_addr; unsigned msi = 0; u8 cfg2; cfg2 = RTL_R8(Config2) & ~MSIEnable; if (cfg->features & RTL_FEATURE_MSI) { - if (pci_enable_msi(pdev)) { - dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); + if (pci_enable_msi(tp->pci_dev)) { + netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); } else { cfg2 |= MSIEnable; msi = RTL_FEATURE_MSI; } } - RTL_W8(Config2, cfg2); + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + RTL_W8(Config2, cfg2); return msi; } @@ -3027,11 +3103,34 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); } +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + static void r810x_pll_power_down(struct rtl8169_private *tp) { if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); + rtl_speed_down(tp); return; } @@ -3123,11 +3222,12 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) rtl_ephy_write(ioaddr, 0x19, 0xff64); if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); + rtl_speed_down(tp); - RTL_W32(RxConfig, RTL_R32(RxConfig) | - AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | + AcceptMulticast | AcceptMyPhys); return; } @@ -3172,8 +3272,8 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) r8168_phy_power_up(tp); } -static void rtl_pll_power_op(struct rtl8169_private *tp, - void (*op)(struct rtl8169_private *)) +static void rtl_generic_op(struct rtl8169_private *tp, + void (*op)(struct rtl8169_private *)) { if (op) op(tp); @@ -3181,12 +3281,12 @@ static void rtl_pll_power_op(struct rtl8169_private *tp, static void rtl_pll_power_down(struct rtl8169_private *tp) { - rtl_pll_power_op(tp, tp->pll_power_ops.down); + rtl_generic_op(tp, tp->pll_power_ops.down); } static void rtl_pll_power_up(struct rtl8169_private *tp) { - rtl_pll_power_op(tp, tp->pll_power_ops.up); + rtl_generic_op(tp, tp->pll_power_ops.up); } static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) @@ -3233,6 +3333,149 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) } } +static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->jumbo_ops.enable); +} + +static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->jumbo_ops.disable); +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(MaxTxPacketSize, 0x3f); + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | 0x01); + pci_write_config_byte(pdev, 0x79, 0x20); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(MaxTxPacketSize, 0x0c); + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~0x01); + pci_write_config_byte(pdev, 0x79, 0x50); +} + +static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) +{ + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } +} + static void rtl_hw_reset(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; @@ -3374,6 +3617,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_init_mdio_ops(tp); rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); rtl8169_print_mac_version(tp); @@ -3387,7 +3631,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->features |= RTL_FEATURE_WOL; if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) tp->features |= RTL_FEATURE_WOL; - tp->features |= rtl_try_msi(pdev, ioaddr, cfg); + tp->features |= rtl_try_msi(tp, cfg); RTL_W8(Cfg9346, Cfg9346_Lock); if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) && @@ -3440,6 +3684,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->intr_event = cfg->intr_event; tp->napi_event = cfg->napi_event; + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + init_timer(&tp->timer); tp->timer.data = (unsigned long) dev; tp->timer.function = rtl8169_phy_timer; @@ -3455,6 +3702,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr, (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } if (tp->mac_version == RTL_GIGA_MAC_VER_27 || tp->mac_version == RTL_GIGA_MAC_VER_28 || @@ -3473,6 +3726,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; err_out_msi_4: + netif_napi_del(&tp->napi); rtl_disable_msi(pdev, tp); iounmap(ioaddr); err_out_free_res_3: @@ -3498,6 +3752,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) cancel_delayed_work_sync(&tp->task); + netif_napi_del(&tp->napi); + unregister_netdev(dev); rtl_release_firmware(tp); @@ -3611,7 +3867,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) void __iomem *ioaddr = tp->mmio_addr; /* Disable interrupts */ - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); if (tp->mac_version == RTL_GIGA_MAC_VER_27 || tp->mac_version == RTL_GIGA_MAC_VER_28 || @@ -3779,21 +4035,6 @@ static void rtl_hw_start_8169(struct net_device *dev) RTL_W16(IntrMask, tp->intr_event); } -static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - int cap = tp->pcie_cap; - - if (cap) { - u16 ctl; - - pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl); - ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force; - pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl); - } -} - static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) { u32 csi; @@ -4093,8 +4334,7 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -4276,6 +4516,11 @@ static void rtl_hw_start_8101(struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) { + tp->intr_event &= ~RxFIFOOver; + tp->napi_event &= ~RxFIFOOver; + } + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) { int cap = tp->pcie_cap; @@ -4336,9 +4581,17 @@ static void rtl_hw_start_8101(struct net_device *dev) static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu) + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu < ETH_ZLEN || + new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) return -EINVAL; + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + dev->mtu = new_mtu; netdev_update_features(dev); @@ -4539,7 +4792,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) /* Wait for any pending NAPI task to complete */ napi_disable(&tp->napi); - rtl8169_irq_mask_and_ack(ioaddr); + rtl8169_irq_mask_and_ack(tp); tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); @@ -4698,7 +4951,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 opts[2]; int frags; - if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); goto err_stop_0; } @@ -4746,10 +4999,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, RTL_W8(TxPoll, NPQ); - if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { netif_stop_queue(dev); - smp_rmb(); - if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) + smp_mb(); + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) netif_wake_queue(dev); } @@ -4849,9 +5102,9 @@ static void rtl8169_tx_interrupt(struct net_device *dev, if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; - smp_wmb(); + smp_mb(); if (netif_queue_stopped(dev) && - (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { netif_wake_queue(dev); } /* @@ -4860,7 +5113,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev, * of start_xmit activity is detected (if it is not detected, * it is slow enough). -- FR */ - smp_rmb(); if (tp->cur_tx != dirty_tx) RTL_W8(TxPoll, NPQ); } @@ -4918,7 +5170,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, u32 status; rmb(); - status = le32_to_cpu(desc->opts1); + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; if (status & DescOwn) break; @@ -4934,11 +5186,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev, rtl8169_schedule_work(dev, rtl8169_reset_task); dev->stats.rx_fifo_errors++; } - rtl8169_mark_to_asic(desc, rx_buf_sz); } else { struct sk_buff *skb; dma_addr_t addr = le64_to_cpu(desc->addr); - int pkt_size = (status & 0x00001FFF) - 4; + int pkt_size = (status & 0x00003fff) - 4; /* * The driver does not support incoming fragmented @@ -4948,16 +5199,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev, if (unlikely(rtl8169_fragmented_frame(status))) { dev->stats.rx_dropped++; dev->stats.rx_length_errors++; - rtl8169_mark_to_asic(desc, rx_buf_sz); - continue; + goto release_descriptor; } skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], tp, pkt_size, addr); - rtl8169_mark_to_asic(desc, rx_buf_sz); if (!skb) { dev->stats.rx_dropped++; - continue; + goto release_descriptor; } rtl8169_rx_csum(skb, status); @@ -4971,13 +5220,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev, dev->stats.rx_bytes += pkt_size; dev->stats.rx_packets++; } - - /* Work around for AMD plateform. */ - if ((desc->opts2 & cpu_to_le32(0xfffe000)) && - (tp->mac_version == RTL_GIGA_MAC_VER_05)) { - desc->opts2 = 0; - cur_rx++; - } +release_descriptor: + desc->opts2 = 0; + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); } count = cur_rx - tp->cur_rx; @@ -5001,13 +5247,17 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) */ status = RTL_R16(IntrStatus); while (status && status != 0xffff) { + status &= tp->intr_event; + if (!status) + break; + handled = 1; /* Handle all of the error cases first. These will reset * the chip, so just exit the loop. */ if (unlikely(!netif_running(dev))) { - rtl8169_asic_down(ioaddr); + rtl8169_asic_down(tp); break; } @@ -5015,27 +5265,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) switch (tp->mac_version) { /* Work around for rx fifo overflow */ case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_26: netif_stop_queue(dev); rtl8169_tx_timeout(dev); goto done; - /* Testers needed. */ - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - /* Experimental science. Pktgen proof. */ - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_25: - if (status == RxFIFOOver) - goto done; - break; default: break; } @@ -5130,7 +5362,7 @@ static void rtl8169_down(struct net_device *dev) spin_lock_irq(&tp->lock); - rtl8169_asic_down(ioaddr); + rtl8169_asic_down(tp); /* * At this point device interrupts can not be enabled in any function, * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task, @@ -5376,6 +5608,9 @@ static void rtl_shutdown(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &pdev->dev; + + pm_runtime_get_sync(d); rtl8169_net_suspend(dev); @@ -5384,13 +5619,16 @@ static void rtl_shutdown(struct pci_dev *pdev) spin_lock_irq(&tp->lock); - rtl8169_asic_down(ioaddr); + rtl8169_asic_down(tp); spin_unlock_irq(&tp->lock); if (system_state == SYSTEM_POWER_OFF) { - /* WoL fails with some 8168 when the receiver is disabled. */ - if (tp->features & RTL_FEATURE_WOL) { + /* WoL fails with 8168b when the receiver is disabled. */ + if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_12 || + tp->mac_version == RTL_GIGA_MAC_VER_17) && + (tp->features & RTL_FEATURE_WOL)) { pci_clear_master(pdev); RTL_W8(ChipCmd, CmdRxEnb); @@ -5401,6 +5639,8 @@ static void rtl_shutdown(struct pci_dev *pdev) pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } + + pm_runtime_put_noidle(d); } static struct pci_driver rtl8169_pci_driver = { diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7d1651bc72d..07e526d63e1 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -651,25 +651,30 @@ static void efx_fini_channels(struct efx_nic *efx) struct efx_channel *channel; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; + struct pci_dev *dev = efx->pci_dev; int rc; EFX_ASSERT_RESET_SERIALISED(efx); BUG_ON(efx->port_enabled); - rc = efx_nic_flush_queues(efx); - if (rc && EFX_WORKAROUND_7803(efx)) { - /* Schedule a reset to recover from the flush failure. The - * descriptor caches reference memory we're about to free, - * but falcon_reconfigure_mac_wrapper() won't reconnect - * the MACs because of the pending reset. */ - netif_err(efx, drv, efx->net_dev, - "Resetting to recover from flush failure\n"); - efx_schedule_reset(efx, RESET_TYPE_ALL); - } else if (rc) { - netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); - } else { - netif_dbg(efx, drv, efx->net_dev, - "successfully flushed all queues\n"); + /* Only perform flush if dma is enabled */ + if (dev->is_busmaster) { + rc = efx_nic_flush_queues(efx); + + if (rc && EFX_WORKAROUND_7803(efx)) { + /* Schedule a reset to recover from the flush failure. The + * descriptor caches reference memory we're about to free, + * but falcon_reconfigure_mac_wrapper() won't reconnect + * the MACs because of the pending reset. */ + netif_err(efx, drv, efx->net_dev, + "Resetting to recover from flush failure\n"); + efx_schedule_reset(efx, RESET_TYPE_ALL); + } else if (rc) { + netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); + } else { + netif_dbg(efx, drv, efx->net_dev, + "successfully flushed all queues\n"); + } } efx_for_each_channel(channel, efx) { @@ -715,6 +720,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) unsigned i; int rc; + efx_device_detach_sync(efx); efx_stop_all(efx); efx_fini_channels(efx); @@ -758,6 +764,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) efx_init_channels(efx); efx_start_all(efx); + netif_device_attach(efx->net_dev); return rc; rollback: @@ -1383,6 +1390,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_channels(efx); if (rc) @@ -1520,8 +1532,12 @@ static void efx_stop_all(struct efx_nic *efx) /* Flush efx_mac_work(), refill_workqueue, monitor_work */ efx_flush_all(efx); - /* Stop the kernel transmit interface late, so the watchdog - * timer isn't ticking over the flush */ + /* Stop the kernel transmit interface. This is only valid if + * the device is stopped or detached; otherwise the watchdog + * may fire immediately. + */ + WARN_ON(netif_running(efx->net_dev) && + netif_device_present(efx->net_dev)); if (efx_dev_registered(efx)) { netif_tx_stop_all_queues(efx->net_dev); netif_tx_lock_bh(efx->net_dev); @@ -1791,10 +1807,11 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) if (new_mtu > EFX_MAX_MTU) return -EINVAL; - efx_stop_all(efx); - netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); + efx_device_detach_sync(efx); + efx_stop_all(efx); + efx_fini_channels(efx); mutex_lock(&efx->mac_lock); @@ -1807,6 +1824,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) efx_init_channels(efx); efx_start_all(efx); + netif_device_attach(efx->net_dev); return rc; } @@ -1942,6 +1960,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; /* Clear MAC statistics */ efx->mac_op->update_stats(efx); @@ -2095,7 +2114,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", RESET_TYPE(method)); - netif_device_detach(efx->net_dev); + efx_device_detach_sync(efx); efx_reset_down(efx, method); rc = efx->type->reset(efx, method); @@ -2554,7 +2573,7 @@ static int efx_pm_freeze(struct device *dev) efx->state = STATE_FINI; - netif_device_detach(efx->net_dev); + efx_device_detach_sync(efx); efx_stop_all(efx); efx_fini_channels(efx); diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index b0d1209ea18..76e891e6062 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -38,6 +38,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -60,10 +61,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); @@ -144,4 +150,17 @@ extern void efx_link_status_changed(struct efx_nic *efx); extern void efx_link_set_advertising(struct efx_nic *efx, u32); extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); +static inline void efx_device_detach_sync(struct efx_nic *efx) +{ + struct net_device *dev = efx->net_dev; + + /* Lock/freeze all TX queues so that we can be sure the + * TX scheduler is stopped when we're done and before + * netif_device_present() becomes false. + */ + netif_tx_lock_bh(dev); + netif_device_detach(dev); + netif_tx_unlock_bh(dev); +} + #endif /* EFX_EFX_H */ diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index d229027dc36..cfaf801c66a 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -677,21 +677,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 60176e873d6..19b996b33d4 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1714,6 +1714,7 @@ const struct efx_nic_type falcon_a1_nic_type = { .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, .prepare_flush = falcon_prepare_flush, + .finish_flush = efx_port_dummy_op_void, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, @@ -1755,6 +1756,7 @@ const struct efx_nic_type falcon_b0_nic_type = { .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, .prepare_flush = falcon_prepare_flush, + .finish_flush = efx_port_dummy_op_void, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c index 95a980fd63d..08addc93ab0 100644 --- a/drivers/net/sfc/filter.c +++ b/drivers/net/sfc/filter.c @@ -335,28 +335,35 @@ static int efx_filter_search(struct efx_filter_table *table, bool for_insert, int *depth_required) { unsigned hash, incr, filter_idx, depth, depth_max; - struct efx_filter_spec *cmp; hash = efx_filter_hash(key); incr = efx_filter_increment(key); - depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ? - FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX); - - for (depth = 1, filter_idx = hash & (table->size - 1); - depth <= depth_max && test_bit(filter_idx, table->used_bitmap); - ++depth) { - cmp = &table->spec[filter_idx]; - if (efx_filter_equal(spec, cmp)) - goto found; + + filter_idx = hash & (table->size - 1); + depth = 1; + depth_max = (for_insert ? + (spec->priority <= EFX_FILTER_PRI_HINT ? + FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) : + table->search_depth[spec->type]); + + for (;;) { + /* Return success if entry is used and matches this spec + * or entry is unused and we are trying to insert. + */ + if (test_bit(filter_idx, table->used_bitmap) ? + efx_filter_equal(spec, &table->spec[filter_idx]) : + for_insert) { + *depth_required = depth; + return filter_idx; + } + + /* Return failure if we reached the maximum search depth */ + if (depth == depth_max) + return for_insert ? -EBUSY : -ENOENT; + filter_idx = (filter_idx + incr) & (table->size - 1); + ++depth; } - if (!for_insert) - return -ENOENT; - if (depth > depth_max) - return -EBUSY; -found: - *depth_required = depth; - return filter_idx; } /* Construct/deconstruct external filter IDs */ diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index 81a42539746..c1000ce549e 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -30,7 +30,7 @@ #define REBOOT_FLAG_PORT0 0x3f8 #define REBOOT_FLAG_PORT1 0x3fc -#define MCDI_RPC_TIMEOUT 10 /*seconds */ +#define MCDI_RPC_TIMEOUT (10 * HZ) #define MCDI_PDU(efx) \ (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) @@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned int time, finish; + unsigned long time, finish; unsigned int respseq, respcmd, error; unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int rc, spins; @@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) * and poll once a jiffy (approximately) */ spins = TICK_USEC; - finish = get_seconds() + MCDI_RPC_TIMEOUT; + finish = jiffies + MCDI_RPC_TIMEOUT; while (1) { if (spins != 0) { @@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) schedule_timeout_uninterruptible(1); } - time = get_seconds(); + time = jiffies; rmb(); efx_readd(efx, ®, pdu); @@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) break; - if (time >= finish) + if (time_after(time, finish)) return -ETIMEDOUT; } @@ -250,7 +250,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx) if (wait_event_timeout( mcdi->wq, atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, - msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) + MCDI_RPC_TIMEOUT) == 0) return -ETIMEDOUT; /* Check if efx_mcdi_set_mode() switched us back to polled completions. @@ -666,9 +666,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list) { uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; - size_t outlen; + size_t outlen, offset, i; int port_num = efx_port_num(efx); - int offset; int rc; BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); @@ -688,10 +687,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; if (mac_address) memcpy(mac_address, outbuf + offset, ETH_ALEN); - if (fw_subtype_list) - memcpy(fw_subtype_list, - outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, - MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); + if (fw_subtype_list) { + offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST; + for (i = 0; + i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / 2; + i++) { + fw_subtype_list[i] = + le16_to_cpup((__le16 *)(outbuf + offset)); + offset += 2; + } + } return 0; diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h index aced2a7856f..b61eea04616 100644 --- a/drivers/net/sfc/mcdi.h +++ b/drivers/net/sfc/mcdi.h @@ -126,5 +126,6 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); +extern int efx_mcdi_set_mac(struct efx_nic *efx); #endif /* EFX_MCDI_H */ diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c index 50c20777a56..da269d7076c 100644 --- a/drivers/net/sfc/mcdi_mac.c +++ b/drivers/net/sfc/mcdi_mac.c @@ -13,7 +13,7 @@ #include "mcdi.h" #include "mcdi_pcol.h" -static int efx_mcdi_set_mac(struct efx_nic *efx) +int efx_mcdi_set_mac(struct efx_nic *efx) { u32 reject, fcntl; u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN]; @@ -45,6 +45,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx) } if (efx->wanted_fc & EFX_FC_AUTO) fcntl = MC_CMD_FCNTL_AUTO; + if (efx->fc_disable) + fcntl = MC_CMD_FCNTL_OFF; MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl); diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index e8d5f03a89f..2f932c594bf 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -214,6 +214,7 @@ struct efx_tx_queue { * If both this and page are %NULL, the buffer slot is currently free. * @page: The associated page buffer, if any. * If both this and skb are %NULL, the buffer slot is currently free. + * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. * @len: Buffer length, in bytes. * @is_page: Indicates if @page is valid. If false, @skb is valid. */ @@ -223,7 +224,8 @@ struct efx_rx_buffer { struct sk_buff *skb; struct page *page; } u; - unsigned int len; + u16 page_offset; + u16 len; bool is_page; }; @@ -690,6 +692,9 @@ struct efx_filter_state; * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. * @multicast_hash: Multicast hash table * @wanted_fc: Wanted flow control flags + * @fc_disable: When non-zero flow control is disabled. Typically used to + * ensure that network back pressure doesn't delay dma queue flushes. + * Serialised by the rtnl lock. * @mac_work: Work item for changing MAC promiscuity and multicast hash * @loopback_mode: Loopback status * @loopback_modes: Supported loopback mode bitmask @@ -783,6 +788,7 @@ struct efx_nic { bool promiscuous; union efx_multicast_hash multicast_hash; u8 wanted_fc; + unsigned fc_disable; atomic_t rx_reset; enum efx_loopback_mode loopback_mode; @@ -834,6 +840,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) * @remove_port: Free resources allocated by probe_port() * @handle_global_event: Handle a "global" event (may be %NULL) * @prepare_flush: Prepare the hardware for flushing the DMA queues + * @finish_flush: Clean up after flushing the DMA queues * @update_stats: Update statistics not provided by event handling * @start_stats: Start the regular fetching of statistics * @stop_stats: Stop the regular fetching of statistics @@ -879,6 +886,7 @@ struct efx_nic_type { void (*remove_port)(struct efx_nic *efx); bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); void (*prepare_flush)(struct efx_nic *efx); + void (*finish_flush)(struct efx_nic *efx); void (*update_stats)(struct efx_nic *efx); void (*start_stats)(struct efx_nic *efx); void (*stop_stats)(struct efx_nic *efx); diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 5ac9fa2cd3b..49490047892 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -370,7 +370,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) return false; tx_queue->empty_read_count = 0; - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 + && tx_queue->write_count - write_count == 1; } /* For each entry inserted into the software descriptor ring, create a @@ -1260,13 +1261,27 @@ int efx_nic_flush_queues(struct efx_nic *efx) } efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised && - tx_queue->flushed != FLUSH_DONE) - ++tx_pending; + tx_queue->flushed != FLUSH_DONE) { + efx_oword_t txd_ptr_tbl; + + efx_reado_table(efx, &txd_ptr_tbl, + FR_BZ_TX_DESC_PTR_TBL, + tx_queue->queue); + if (EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_FLUSH) || + EFX_OWORD_FIELD(txd_ptr_tbl, + FRF_AZ_TX_DESCQ_EN)) + ++tx_pending; + else + tx_queue->flushed = FLUSH_DONE; + } } } - if (rx_pending == 0 && tx_pending == 0) + if (rx_pending == 0 && tx_pending == 0) { + efx->type->finish_flush(efx); return 0; + } msleep(EFX_FLUSH_INTERVAL); efx_poll_flush_events(efx); @@ -1292,6 +1307,7 @@ int efx_nic_flush_queues(struct efx_nic *efx) } } + efx->type->finish_flush(efx); return -ETIMEDOUT; } diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index 7443f99c977..8a2c4f5aa29 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -65,6 +65,11 @@ enum { #define FALCON_GMAC_LOOPBACKS \ (1 << LOOPBACK_GMAC) +/* Alignment of PCIe DMA boundaries (4KB) */ +#define EFX_PAGE_SIZE 4096 +/* Size and alignment of buffer table entries (same) */ +#define EFX_BUF_SIZE EFX_PAGE_SIZE + /** * struct falcon_board_type - board operations and type information * @id: Board type id, as found in NVRAM @@ -206,6 +211,8 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx); /* Global Resources */ extern int efx_nic_flush_queues(struct efx_nic *efx); +extern void siena_prepare_flush(struct efx_nic *efx); +extern void siena_finish_flush(struct efx_nic *efx); extern void falcon_start_nic_stats(struct efx_nic *efx); extern void falcon_stop_nic_stats(struct efx_nic *efx); extern void falcon_setup_xaui(struct efx_nic *efx); diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 62e43649466..d429f0aadb0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -94,11 +94,7 @@ static unsigned int rx_refill_limit = 95; static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, struct efx_rx_buffer *buf) { - /* Offset is always within one page, so we don't need to consider - * the page order. - */ - return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) + - efx->type->rx_buffer_hash_size); + return buf->page_offset + efx->type->rx_buffer_hash_size; } static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) { @@ -155,11 +151,10 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) if (unlikely(!skb)) return -ENOMEM; - /* Adjust the SKB for padding and checksum */ + /* Adjust the SKB for padding */ skb_reserve(skb, NET_IP_ALIGN); rx_buf->len = skb_len - NET_IP_ALIGN; rx_buf->is_page = false; - skb->ip_summed = CHECKSUM_UNNECESSARY; rx_buf->dma_addr = pci_map_single(efx->pci_dev, skb->data, rx_buf->len, @@ -194,6 +189,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) struct efx_rx_buffer *rx_buf; struct page *page; void *page_addr; + unsigned int page_offset; struct efx_rx_page_state *state; dma_addr_t dma_addr; unsigned index, count; @@ -220,12 +216,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) page_addr += sizeof(struct efx_rx_page_state); dma_addr += sizeof(struct efx_rx_page_state); + page_offset = sizeof(struct efx_rx_page_state); split: index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->u.page = page; + rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->is_page = true; ++rx_queue->added_count; @@ -237,6 +235,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) get_page(page); dma_addr += (PAGE_SIZE >> 1); page_addr += (PAGE_SIZE >> 1); + page_offset += (PAGE_SIZE >> 1); ++count; goto split; } @@ -246,7 +245,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) } static void efx_unmap_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf) + struct efx_rx_buffer *rx_buf, + unsigned int used_len) { if (rx_buf->is_page && rx_buf->u.page) { struct efx_rx_page_state *state; @@ -257,6 +257,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, state->dma_addr, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); + } else if (used_len) { + dma_sync_single_for_cpu(&efx->pci_dev->dev, + rx_buf->dma_addr, used_len, + DMA_FROM_DEVICE); } } else if (!rx_buf->is_page && rx_buf->u.skb) { pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, @@ -279,7 +283,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { - efx_unmap_rx_buffer(rx_queue->efx, rx_buf); + efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); efx_free_rx_buffer(rx_queue->efx, rx_buf); } @@ -498,6 +502,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, EFX_BUG_ON_PARANOID(!checksummed); rx_buf->u.skb = NULL; + skb->ip_summed = CHECKSUM_UNNECESSARY; gro_result = napi_gro_receive(napi, skb); } @@ -549,10 +554,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, goto out; } - /* Release card resources - assumes all RX buffers consumed in-order - * per RX queue + /* Release and/or sync DMA mapping - assumes all RX buffers + * consumed in-order per RX queue */ - efx_unmap_rx_buffer(efx, rx_buf); + efx_unmap_rx_buffer(efx, rx_buf, len); /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 822f6c2a6a7..49078858aaa 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -698,7 +698,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, /* Detach the device so the kernel doesn't transmit during the * loopback test and the watchdog timeout doesn't fire. */ - netif_device_detach(efx->net_dev); + efx_device_detach_sync(efx); mutex_lock(&efx->mac_lock); if (efx->loopback_modes) { diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index ceac1c9907f..062494a5097 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -135,6 +135,18 @@ static void siena_remove_port(struct efx_nic *efx) efx_nic_free_buffer(efx, &efx->stats_buffer); } +void siena_prepare_flush(struct efx_nic *efx) +{ + if (efx->fc_disable++ == 0) + efx_mcdi_set_mac(efx); +} + +void siena_finish_flush(struct efx_nic *efx) +{ + if (--efx->fc_disable == 0) + efx_mcdi_set_mac(efx); +} + static const struct efx_nic_register_test siena_register_tests[] = { { FR_AZ_ADR_REGION, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, @@ -372,14 +384,13 @@ static void siena_remove_nic(struct efx_nic *efx) efx->nic_data = NULL; } -#define STATS_GENERATION_INVALID ((u64)(-1)) +#define STATS_GENERATION_INVALID ((__force __le64)(-1)) static int siena_try_update_nic_stats(struct efx_nic *efx) { - u64 *dma_stats; + __le64 *dma_stats; struct efx_mac_stats *mac_stats; - u64 generation_start; - u64 generation_end; + __le64 generation_start, generation_end; mac_stats = &efx->mac_stats; dma_stats = (u64 *)efx->stats_buffer.addr; @@ -390,7 +401,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) rmb(); #define MAC_STAT(M, D) \ - mac_stats->M = dma_stats[MC_CMD_MAC_ ## D] + mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D]) MAC_STAT(tx_bytes, TX_BYTES); MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); @@ -460,7 +471,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); mac_stats->rx_good_lt64 = 0; - efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]; + efx->n_rx_nodesc_drop_cnt = + le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]); #undef MAC_STAT @@ -489,7 +501,7 @@ static void siena_update_nic_stats(struct efx_nic *efx) static void siena_start_nic_stats(struct efx_nic *efx) { - u64 *dma_stats = (u64 *)efx->stats_buffer.addr; + __le64 *dma_stats = efx->stats_buffer.addr; dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; @@ -590,7 +602,8 @@ const struct efx_nic_type siena_a0_nic_type = { .reset = siena_reset_hw, .probe_port = siena_probe_port, .remove_port = siena_remove_port, - .prepare_flush = efx_port_dummy_op_void, + .prepare_flush = siena_prepare_flush, + .finish_flush = siena_finish_flush, .update_stats = siena_update_nic_stats, .start_stats = siena_start_nic_stats, .stop_stats = siena_stop_nic_stats, diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 84eb99e0f8d..6d3b68a8478 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -115,6 +115,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/drivers/net/skge.c b/drivers/net/skge.c index f4be5c78ebf..b446e7e5f43 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -4097,6 +4097,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = { DMI_MATCH(DMI_BOARD_NAME, "nForce"), }, }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, {} }; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3ee41da130c..7f7aae2f6d4 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -94,6 +94,10 @@ static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); +static int legacy_pme = 0; +module_param(legacy_pme, int, 0); +MODULE_PARM_DESC(legacy_pme, "Legacy power management"); + static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ @@ -794,6 +798,13 @@ static void sky2_wol_init(struct sky2_port *sky2) /* Disable PiG firmware */ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); + /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ + if (legacy_pme) { + u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= PCI_Y2_PME_LEGACY; + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + } + /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); } @@ -981,7 +992,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); - tp = space - 2048/8; + tp = space - 8192/8; sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); } else { @@ -2333,8 +2344,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb_copy_from_linear_data(re->skb, skb->data, length); skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; + skb->rxhash = re->skb->rxhash; + skb->vlan_tci = re->skb->vlan_tci; + pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); + re->skb->vlan_tci = 0; + re->skb->rxhash = 0; re->skb->ip_summed = CHECKSUM_NONE; skb_put(skb, length); } @@ -2419,9 +2435,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev, struct sk_buff *skb = NULL; u16 count = (status & GMR_FS_LEN) >> 16; - if (status & GMR_FS_VLAN) - count -= VLAN_HLEN; /* Account for vlan tag */ - netif_printk(sky2, rx_status, KERN_DEBUG, dev, "rx slot %u status 0x%x len %d\n", sky2->rx_next, status, length); @@ -2429,6 +2442,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev, sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); + if (vlan_tx_tag_present(re->skb)) + count -= VLAN_HLEN; /* Account for vlan tag */ + /* This chip has hardware problems that generates bogus status. * So do only marginal checking and expect higher level protocols * to handle crap frames. @@ -2486,11 +2502,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } static inline void sky2_skb_rx(const struct sky2_port *sky2, - u32 status, struct sk_buff *skb) + struct sk_buff *skb) { - if (status & GMR_FS_VLAN) - __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); - if (skb->ip_summed == CHECKSUM_NONE) netif_receive_skb(skb); else @@ -2544,6 +2557,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) } } +static void sky2_rx_tag(struct sky2_port *sky2, u16 length) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + __vlan_hwaccel_put_tag(skb, be16_to_cpu(length)); +} + static void sky2_rx_hash(struct sky2_port *sky2, u32 status) { struct sk_buff *skb; @@ -2602,8 +2623,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) } skb->protocol = eth_type_trans(skb, dev); - - sky2_skb_rx(sky2, status, skb); + sky2_skb_rx(sky2, skb); /* Stop after net poll weight */ if (++work_done >= to_do) @@ -2611,11 +2631,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) break; case OP_RXVLAN: - sky2->rx_tag = length; + sky2_rx_tag(sky2, length); break; case OP_RXCHKSVLAN: - sky2->rx_tag = length; + sky2_rx_tag(sky2, length); /* fall through */ case OP_RXCHKS: if (likely(dev->features & NETIF_F_RXCSUM)) @@ -2909,8 +2929,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) /* Reading this mask interrupts as side effect */ status = sky2_read32(hw, B0_Y2_SP_ISRC2); - if (status == 0 || status == ~0) + if (status == 0 || status == ~0) { + sky2_write32(hw, B0_Y2_SP_ICR, 2); return IRQ_NONE; + } prefetch(&hw->st_le[hw->st_idx]); @@ -4186,10 +4208,12 @@ static int sky2_set_features(struct net_device *dev, u32 features) struct sky2_port *sky2 = netdev_priv(dev); u32 changed = dev->features ^ features; - if (changed & NETIF_F_RXCSUM) { - u32 on = features & NETIF_F_RXCSUM; - sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), - on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); + if ((changed & NETIF_F_RXCSUM) && + !(sky2->hw->flags & SKY2_HW_NEW_LE)) { + sky2_write32(sky2->hw, + Q_ADDR(rxqaddr[sky2->port], Q_CSR), + (features & NETIF_F_RXCSUM) + ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); } if (changed & NETIF_F_RXHASH) diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 318c9ae7bf9..8cc863e2d2d 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2064,7 +2064,7 @@ enum { GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ -#define GMAC_DEF_MSK GM_IS_TX_FF_UR +#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) }; /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ @@ -2236,7 +2236,6 @@ struct sky2_port { u16 rx_pending; u16 rx_data_size; u16 rx_nfrags; - u16 rx_tag; struct { unsigned long last; diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index c6d47d10590..3d12e8ce939 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -1083,10 +1083,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) /* Quickly dumps bad packets */ static void -smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) +smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords) { - unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; - if (likely(pktwords >= 4)) { unsigned int timeout = 500; unsigned int val; @@ -1150,7 +1148,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) continue; } - skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); + skb = netdev_alloc_skb(dev, pktwords << 2); if (unlikely(!skb)) { SMSC_WARN(pdata, rx_err, "Unable to allocate skb for rx packet"); @@ -1160,14 +1158,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) break; } - skb->data = skb->head; - skb_reset_tail_pointer(skb); + pdata->ops->rx_readfifo(pdata, + (unsigned int *)skb->data, pktwords); /* Align IP on 16B boundary */ skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pktlength - 4); - pdata->ops->rx_readfifo(pdata, - (unsigned int *)skb->head, pktwords); skb->protocol = eth_type_trans(skb, dev); skb_checksum_none_assert(skb); netif_receive_skb(skb); @@ -1390,7 +1386,7 @@ static int smsc911x_open(struct net_device *dev) smsc911x_reg_write(pdata, FIFO_INT, temp); /* set RX Data offset to 2 bytes for alignment */ - smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); + smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8)); /* enable NAPI polling before enabling RX interrupts */ napi_enable(&pdata->napi); diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index ab593009926..361beb797d1 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -2363,7 +2363,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); /* Switch off MAC, remember WOL setting */ - gp->asleep_wol = gp->wake_on_lan; + gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); } else gp->asleep_wol = 0; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index bc8c183d622..eaa24fa8c19 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -740,8 +740,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) if (sblk->status & SD_STATUS_LINK_CHG) work_exists = 1; } - /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + + /* check for TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons) + work_exists = 1; + + /* check for RX work to do */ + if (tnapi->rx_rcb_prod_idx && *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_exists = 1; @@ -991,14 +996,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set) return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg); } -#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \ - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ - MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \ - MII_TG3_AUXCTL_ACTL_TX_6DB) +static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable) +{ + u32 val; + int err; -#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \ - tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \ - MII_TG3_AUXCTL_ACTL_TX_6DB); + err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val); + + if (err) + return err; + if (enable) + + val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + else + val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; + + err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, + val | MII_TG3_AUXCTL_ACTL_TX_6DB); + + return err; +} static int tg3_bmcr_reset(struct tg3 *tp) { @@ -1770,7 +1787,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp) otp = tp->phy_otp; - if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) + if (tg3_phy_toggle_auxctl_smdsp(tp, true)) return; phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT); @@ -1795,7 +1812,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp) ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT); tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) @@ -1843,9 +1860,9 @@ static void tg3_phy_eee_enable(struct tg3 *tp) (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } val = tr32(TG3_CPMU_EEE_MODE); @@ -1990,7 +2007,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) (MII_TG3_CTRL_AS_MASTER | MII_TG3_CTRL_ENABLE_AS_MASTER)); - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + err = tg3_phy_toggle_auxctl_smdsp(tp, true); if (err) return err; @@ -2011,7 +2028,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200); tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); tg3_writephy(tp, MII_TG3_CTRL, phy9_orig); @@ -2100,10 +2117,10 @@ static int tg3_phy_reset(struct tg3 *tp) out: if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) && - !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + !tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_phydsp_write(tp, 0x201f, 0x2aaa); tg3_phydsp_write(tp, 0x000a, 0x0323); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) { @@ -2112,14 +2129,14 @@ static int tg3_phy_reset(struct tg3 *tp) } if (tp->phy_flags & TG3_PHYFLG_BER_BUG) { - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_phydsp_write(tp, 0x000a, 0x310b); tg3_phydsp_write(tp, 0x201f, 0x9506); tg3_phydsp_write(tp, 0x401f, 0x14e2); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) { - if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { + if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) { tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) { tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b); @@ -2128,7 +2145,7 @@ static int tg3_phy_reset(struct tg3 *tp) } else tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); - TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + tg3_phy_toggle_auxctl_smdsp(tp, false); } } @@ -2976,7 +2993,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) tw32(TG3_CPMU_EEE_MODE, tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + err = tg3_phy_toggle_auxctl_smdsp(tp, true); if (!err) { u32 err2; @@ -3003,7 +3020,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) val |= MDIO_AN_EEE_ADV_1000T; err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); - err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + err2 = tg3_phy_toggle_auxctl_smdsp(tp, false); if (!err) err = err2; } @@ -5216,6 +5233,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) return work_done; } + if (!tnapi->rx_rcb_prod_idx) + return work_done; + /* run RX thread, within the bounds set by NAPI. * All RX "locking" is done by ensuring outside * code synchronizes with tg3->napi.poll() @@ -5654,6 +5674,9 @@ static void tg3_poll_controller(struct net_device *dev) int i; struct tg3 *tp = netdev_priv(dev); + if (tg3_irq_sync(tp)) + return; + for (i = 0; i < tp->irq_cnt; i++) tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); } @@ -6626,6 +6649,12 @@ static int tg3_alloc_consistent(struct tg3 *tp) */ switch (i) { default: + if (tg3_flag(tp, ENABLE_RSS)) { + tnapi->rx_rcb_prod_idx = NULL; + break; + } + /* Fall through */ + case 1: tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; break; case 2: @@ -13633,9 +13662,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3) || - (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF))) + tp->fw_needed) { + /* For firmware TSO, assume ASF is disabled. + * We'll disable TSO later if we discover ASF + * is enabled in tg3_get_eeprom_hw_cfg(). + */ tg3_flag_set(tp, TSO_CAPABLE); - else { + } else { tg3_flag_clear(tp, TSO_CAPABLE); tg3_flag_clear(tp, TSO_BUG); tp->fw_needed = NULL; @@ -13671,8 +13704,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_flag_set(tp, 4G_DMA_BNDRY_BUG); - if (tg3_flag(tp, 5755_PLUS)) - tg3_flag_set(tp, SHORT_DMA_BUG); + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, SHORT_DMA_BUG); else tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG); @@ -13873,6 +13907,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_get_eeprom_hw_cfg(tp); + if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { + tg3_flag_clear(tp, TSO_CAPABLE); + tg3_flag_clear(tp, TSO_BUG); + tp->fw_needed = NULL; + } + if (tg3_flag(tp, ENABLE_APE)) { /* Allow reads and writes to the * APE register and memory space. @@ -14956,6 +14996,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tp->pm_cap = pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; + tp->irq_sync = 1; if (tg3_debug > 0) tp->msg_enable = tg3_debug; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 76b86506567..b013e7f7af3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -417,6 +417,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) * for indefinite time. */ skb_orphan(skb); + nf_reset(skb); + /* Enqueue packet */ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); @@ -1245,10 +1247,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, } #endif - if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) + if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; - + } else { + memset(&ifr, 0, sizeof(ifr)); + } if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index c5c4b4def7f..e2a988c457d 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -371,7 +371,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_pull(skb, (size + 1) & 0xfffe); - if (skb->len == 0) + if (skb->len < sizeof(header)) break; head = (u8 *) skb->data; @@ -398,7 +398,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, u32 packet_len; u32 padbytes = 0xffff0000; - padlen = ((skb->len + 4) % 512) ? 0 : 4; + padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4; if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) { @@ -420,7 +420,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, cpu_to_le32s(&packet_len); skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); - if ((skb->len % 512) == 0) { + if (padlen) { cpu_to_le32s(&padbytes); memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); skb_put(skb, sizeof(padbytes)); @@ -1484,6 +1484,10 @@ static const struct usb_device_id products [] = { // Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter" USB_DEVICE (0x6189, 0x182d), .driver_info = (unsigned long) &ax8817x_info, +}, { + // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter" + USB_DEVICE (0x0df6, 0x0056), + .driver_info = (unsigned long) &ax88178_info, }, { // corega FEther USB2-TX USB_DEVICE (0x07aa, 0x0017), @@ -1532,6 +1536,10 @@ static const struct usb_device_id products [] = { // DLink DUB-E100 H/W Ver B1 Alternate USB_DEVICE (0x2001, 0x3c05), .driver_info = (unsigned long) &ax88772_info, +}, { + // DLink DUB-E100 H/W Ver C1 + USB_DEVICE (0x2001, 0x1a02), + .driver_info = (unsigned long) &ax88772_info, }, { // Linksys USB1000 USB_DEVICE (0x1737, 0x0039), @@ -1560,6 +1568,10 @@ static const struct usb_device_id products [] = { // ASIX 88772a USB_DEVICE(0x0db0, 0xa877), .driver_info = (unsigned long) &ax88772_info, +}, { + // Asus USB Ethernet Adapter + USB_DEVICE (0x0b95, 0x7e2b), + .driver_info = (unsigned long) &ax88772_info, }, { }, // END }; diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c index 882f53f708d..82d43b214f9 100644 --- a/drivers/net/usb/cdc_eem.c +++ b/drivers/net/usb/cdc_eem.c @@ -93,6 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf) /* no jumbogram (16K) support for now */ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c924ea2bce0..544c309e0d9 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) struct cdc_state *info = (void *) &dev->data; int status; int rndis; + bool android_rndis_quirk = false; struct usb_driver *driver = driver_of(intf); struct usb_cdc_mdlm_desc *desc = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; @@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) info->control, info->u->bSlaveInterface0, info->data); + /* fall back to hard-wiring for RNDIS */ + if (rndis) { + android_rndis_quirk = true; + goto next_desc; + } goto bad_desc; } if (info->control != intf) { @@ -271,11 +277,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) /* Microsoft ActiveSync based and some regular RNDIS devices lack the * CDC descriptors, so we'll hard-wire the interfaces and not check * for descriptors. + * + * Some Android RNDIS devices have a CDC Union descriptor pointing + * to non-existing interfaces. Ignore that and attempt the same + * hard-wired 0 and 1 interfaces. */ - if (rndis && !info->u) { + if (rndis && (!info->u || android_rndis_quirk)) { info->control = usb_ifnum_to_if(dev->udev, 0); info->data = usb_ifnum_to_if(dev->udev, 1); - if (!info->control || !info->data) { + if (!info->control || !info->data || info->control != intf) { dev_dbg(&intf->dev, "rndis: master #0/%p slave #1/%p\n", info->control, @@ -472,6 +482,7 @@ static const struct driver_info wwan_info = { /*-------------------------------------------------------------------------*/ #define HUAWEI_VENDOR_ID 0x12D1 +#define NOVATEL_VENDOR_ID 0x1410 static const struct usb_device_id products [] = { /* @@ -570,6 +581,13 @@ static const struct usb_device_id products [] = { .driver_info = (unsigned long)&wwan_info, }, +/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */ +{ + USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* * WHITELIST!!! * @@ -582,6 +600,21 @@ static const struct usb_device_id products [] = { * because of bugs/quirks in a given product (like Zaurus, above). */ { + /* Novatel USB551L */ + /* This match must come *before* the generic CDC-ETHER match so that + * we get FLAG_WWAN set on the device, since it's descriptors are + * generic CDC-ETHER. + */ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR + | USB_DEVICE_ID_MATCH_PRODUCT + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = NOVATEL_VENDOR_ID, + .idProduct = 0xB001, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long)&wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 8f9b7f76045..ab43674af75 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -59,7 +59,10 @@ #define USB_PRODUCT_IPHONE_3G 0x1292 #define USB_PRODUCT_IPHONE_3GS 0x1294 #define USB_PRODUCT_IPHONE_4 0x1297 +#define USB_PRODUCT_IPAD 0x129a #define USB_PRODUCT_IPHONE_4_VZW 0x129c +#define USB_PRODUCT_IPHONE_4S 0x12a0 +#define USB_PRODUCT_IPHONE_5 0x12a8 #define IPHETH_USBINTF_CLASS 255 #define IPHETH_USBINTF_SUBCLASS 253 @@ -99,10 +102,22 @@ static struct usb_device_id ipheth_table[] = { USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPAD, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, { } }; MODULE_DEVICE_TABLE(usb, ipheth_table); diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index ad0298f9b5f..3362449a2d9 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1308,7 +1308,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev, int retv; int length = 0; /* shut up GCC */ - urb = usb_alloc_urb(0, GFP_NOIO); + urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index ed1b4321058..e7732508b8f 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) return -EIO; } - *datap = *attrdata; + *datap = le16_to_cpu(*attrdata); kfree(attrdata); return result; @@ -943,7 +943,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, } static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; -static const struct sierra_net_info_data sierra_net_info_data_68A3 = { +static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { .rx_urb_size = 8 * 1024, .whitelist = { .infolen = ARRAY_SIZE(sierra_net_ifnum_list), @@ -951,7 +951,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = { } }; -static const struct driver_info sierra_net_info_68A3 = { +static const struct driver_info sierra_net_info_direct_ip = { .description = "Sierra Wireless USB-to-WWAN Modem", .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = sierra_net_bind, @@ -959,12 +959,18 @@ static const struct driver_info sierra_net_info_68A3 = { .status = sierra_net_status, .rx_fixup = sierra_net_rx_fixup, .tx_fixup = sierra_net_tx_fixup, - .data = (unsigned long)&sierra_net_info_data_68A3, + .data = (unsigned long)&sierra_net_info_data_direct_ip, }; static const struct usb_device_id products[] = { {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ - .driver_info = (unsigned long) &sierra_net_info_68A3}, + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ + .driver_info = (unsigned long) &sierra_net_info_direct_ip}, {}, /* last item */ }; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 15b3d6888ae..2f4775f2313 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -719,8 +719,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); + int ret; + + if (new_mtu > MAX_SINGLE_PACKET_SIZE) + return -EINVAL; - int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); + ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); check_warn_return(ret, "Failed to set mac rx frame length"); return usbnet_change_mtu(netdev, new_mtu); @@ -964,7 +968,7 @@ static int smsc75xx_reset(struct usbnet *dev) netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf); - ret = smsc75xx_set_rx_max_frame_length(dev, 1514); + ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); check_warn_return(ret, "Failed to set max rx frame length"); ret = smsc75xx_read_reg(dev, MAC_RX, &buf); @@ -1049,6 +1053,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &smsc75xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } @@ -1107,8 +1112,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { - /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ - if (unlikely(size > (ETH_FRAME_LEN + 12))) { + /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ + if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x", rx_cmd_a); return 0; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index f74f3ce7152..e5c15bbbe62 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1190,7 +1190,7 @@ static const struct driver_info smsc95xx_info = { .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, - .flags = FLAG_ETHER | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; static const struct usb_device_id products[] = { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ce395fe5de2..80637295278 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -277,17 +277,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu) } EXPORT_SYMBOL_GPL(usbnet_change_mtu); +/* The caller must hold list->lock */ +static void __usbnet_queue_skb(struct sk_buff_head *list, + struct sk_buff *newsk, enum skb_state state) +{ + struct skb_data *entry = (struct skb_data *) newsk->cb; + + __skb_queue_tail(list, newsk); + entry->state = state; +} + /*-------------------------------------------------------------------------*/ /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from * completion callbacks. 2.5 should have fixed those bugs... */ -static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list) +static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb, + struct sk_buff_head *list, enum skb_state state) { unsigned long flags; + enum skb_state old_state; + struct skb_data *entry = (struct skb_data *) skb->cb; spin_lock_irqsave(&list->lock, flags); + old_state = entry->state; + entry->state = state; __skb_unlink(skb, list); spin_unlock(&list->lock); spin_lock(&dev->done.lock); @@ -295,6 +310,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea if (dev->done.qlen == 1) tasklet_schedule(&dev->bh); spin_unlock_irqrestore(&dev->done.lock, flags); + return old_state; } /* some work can't be done in tasklets, so we use keventd @@ -335,7 +351,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; - entry->state = rx_start; entry->length = 0; usb_fill_bulk_urb (urb, dev->udev, dev->in, @@ -367,7 +382,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - __skb_queue_tail (&dev->rxq, skb); + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); @@ -418,16 +433,17 @@ static void rx_complete (struct urb *urb) struct skb_data *entry = (struct skb_data *) skb->cb; struct usbnet *dev = entry->dev; int urb_status = urb->status; + enum skb_state state; skb_put (skb, urb->actual_length); - entry->state = rx_done; + state = rx_done; entry->urb = NULL; switch (urb_status) { /* success */ case 0: if (skb->len < dev->net->hard_header_len) { - entry->state = rx_cleanup; + state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, @@ -466,7 +482,7 @@ static void rx_complete (struct urb *urb) "rx throttle %d\n", urb_status); } block: - entry->state = rx_cleanup; + state = rx_cleanup; entry->urb = urb; urb = NULL; break; @@ -477,17 +493,18 @@ static void rx_complete (struct urb *urb) // FALLTHROUGH default: - entry->state = rx_cleanup; + state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } - defer_bh(dev, skb, &dev->rxq); + state = defer_bh(dev, skb, &dev->rxq, state); if (urb) { if (netif_running (dev->net) && - !test_bit (EVENT_RX_HALT, &dev->flags)) { + !test_bit (EVENT_RX_HALT, &dev->flags) && + state != unlink_start) { rx_submit (dev, urb, GFP_ATOMIC); return; } @@ -573,18 +590,34 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq); static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) { unsigned long flags; - struct sk_buff *skb, *skbnext; + struct sk_buff *skb; int count = 0; spin_lock_irqsave (&q->lock, flags); - skb_queue_walk_safe(q, skb, skbnext) { + while (!skb_queue_empty(q)) { struct skb_data *entry; struct urb *urb; int retval; - entry = (struct skb_data *) skb->cb; + skb_queue_walk(q, skb) { + entry = (struct skb_data *) skb->cb; + if (entry->state != unlink_start) + goto found; + } + break; +found: + entry->state = unlink_start; urb = entry->urb; + /* + * Get reference count of the URB to avoid it to be + * freed during usb_unlink_urb, which may trigger + * use-after-free problem inside usb_unlink_urb since + * usb_unlink_urb is always racing with .complete + * handler(include defer_bh). + */ + usb_get_urb(urb); + spin_unlock_irqrestore(&q->lock, flags); // during some PM-driven resume scenarios, // these (async) unlinks complete immediately retval = usb_unlink_urb (urb); @@ -592,6 +625,8 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) netdev_dbg(dev->net, "unlink urb err, %d\n", retval); else count++; + usb_put_urb(urb); + spin_lock_irqsave(&q->lock, flags); } spin_unlock_irqrestore (&q->lock, flags); return count; @@ -1022,9 +1057,7 @@ static void tx_complete (struct urb *urb) } usb_autopm_put_interface_async(dev->intf); - urb->dev = NULL; - entry->state = tx_done; - defer_bh(dev, skb, &dev->txq); + (void) defer_bh(dev, skb, &dev->txq, tx_done); } /*-------------------------------------------------------------------------*/ @@ -1077,7 +1110,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, entry = (struct skb_data *) skb->cb; entry->urb = urb; entry->dev = dev; - entry->state = tx_start; entry->length = length; usb_fill_bulk_urb (urb, dev->udev, dev->out, @@ -1117,6 +1149,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, usb_anchor_urb(urb, &dev->deferred); /* no use to process more packets */ netif_stop_queue(net); + usb_put_urb(urb); spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_dbg(dev->net, "Delaying transmission for resumption\n"); goto deferred; @@ -1136,7 +1169,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, break; case 0: net->trans_start = jiffies; - __skb_queue_tail (&dev->txq, skb); + __usbnet_queue_skb(&dev->txq, skb, tx_start); if (dev->txq.qlen >= TX_QLEN (dev)) netif_stop_queue (net); } @@ -1258,6 +1291,8 @@ void usbnet_disconnect (struct usb_interface *intf) cancel_work_sync(&dev->kevent); + usb_scuttle_anchored_urbs(&dev->deferred); + if (dev->driver_info->unbind) dev->driver_info->unbind (dev, intf); diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 1a2234c2051..c1e6a446d13 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -332,6 +332,11 @@ static const struct usb_device_id products [] = { .driver_info = ZAURUS_PXA_INFO, }, { + /* Motorola Rokr E6 */ + USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6027, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &bogus_mdlm_info, +}, { /* Motorola MOTOMAGX phones */ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), @@ -349,6 +354,13 @@ static const struct usb_device_id products [] = { ZAURUS_MASTER_INTERFACE, .driver_info = OLYMPUS_MXL_INFO, }, + +/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */ +{ + USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM, + USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long) &bogus_mdlm_info, +}, { }, // END }; MODULE_DEVICE_TABLE(usb, products); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 4bf7c6d4ab9..6c0a3b0f0af 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -421,7 +421,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head) unregister_netdevice_queue(peer, head); } -static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; +static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { + [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, +}; static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 06daa9d6fee..c7e493461e0 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2513,9 +2513,6 @@ static int velocity_close(struct net_device *dev) if (dev->irq != 0) free_irq(dev->irq, dev); - /* Power down the chip */ - pci_set_power_state(vptr->pdev, PCI_D3hot); - velocity_free_rings(vptr); vptr->flags &= (~VELOCITY_FLAGS_OPENED); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 67402350d0d..0ef676dcb9c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -830,13 +830,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ctx->l4_hdr_size = ((struct tcphdr *) skb_transport_header(skb))->doff * 4; else if (iph->protocol == IPPROTO_UDP) - /* - * Use tcp header size so that bytes to - * be copied are more than required by - * the device. - */ ctx->l4_hdr_size = - sizeof(struct tcphdr); + sizeof(struct udphdr); else ctx->l4_hdr_size = 0; } else { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index e08d75e3f17..862be050009 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01011200 +#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index f1e1643dc3e..78c51ab2e9b 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -8,6 +8,7 @@ * as published by the Free Software Foundation. */ +#include #include #include #include diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h index 6650fde99e1..9f1e947f355 100644 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ b/drivers/net/wimax/i2400m/i2400m-usb.h @@ -152,6 +152,9 @@ enum { /* Device IDs */ USB_DEVICE_ID_I6050 = 0x0186, USB_DEVICE_ID_I6050_2 = 0x0188, + USB_DEVICE_ID_I6150 = 0x07d6, + USB_DEVICE_ID_I6150_2 = 0x07d7, + USB_DEVICE_ID_I6150_3 = 0x07d9, USB_DEVICE_ID_I6250 = 0x0187, }; diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c index 2edd8fe1c1f..0a998638e1b 100644 --- a/drivers/net/wimax/i2400m/netdev.c +++ b/drivers/net/wimax/i2400m/netdev.c @@ -606,7 +606,8 @@ static void i2400m_get_drvinfo(struct net_device *net_dev, struct i2400m *i2400m = net_dev_to_i2400m(net_dev); strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1); - strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1); + strncpy(info->fw_version, + i2400m->fw_name ? : "", sizeof(info->fw_version) - 1); if (net_dev->dev.parent) strncpy(info->bus_info, dev_name(net_dev->dev.parent), sizeof(info->bus_info) - 1); diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index 298f2b0b631..0ddc8db2a45 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -491,6 +491,9 @@ int i2400mu_probe(struct usb_interface *iface, switch (id->idProduct) { case USB_DEVICE_ID_I6050: case USB_DEVICE_ID_I6050_2: + case USB_DEVICE_ID_I6150: + case USB_DEVICE_ID_I6150_2: + case USB_DEVICE_ID_I6150_3: case USB_DEVICE_ID_I6250: i2400mu->i6050 = 1; break; @@ -740,6 +743,9 @@ static struct usb_device_id i2400mu_id_table[] = { { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) }, + { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) }, { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, { USB_DEVICE(0x8086, 0x0181) }, { USB_DEVICE(0x8086, 0x1403) }, diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 441bb33f17a..0f23b1a789e 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -489,8 +489,6 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah) ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows); ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows); ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows); - ATH_ALLOC_BANK(ah->addac5416_21, - ah->iniAddac.ia_rows * ah->iniAddac.ia_columns); ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows); return 0; @@ -519,7 +517,6 @@ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah) ATH_FREE_BANK(ah->analogBank6Data); ATH_FREE_BANK(ah->analogBank6TPCData); ATH_FREE_BANK(ah->analogBank7Data); - ATH_FREE_BANK(ah->addac5416_21); ATH_FREE_BANK(ah->bank6Temp); #undef ATH_FREE_BANK @@ -799,27 +796,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); ah->eep_ops->set_addac(ah, chan); - if (AR_SREV_5416_22_OR_LATER(ah)) { - REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites); - } else { - struct ar5416IniArray temp; - u32 addacSize = - sizeof(u32) * ah->iniAddac.ia_rows * - ah->iniAddac.ia_columns; - - /* For AR5416 2.0/2.1 */ - memcpy(ah->addac5416_21, - ah->iniAddac.ia_array, addacSize); - - /* override CLKDRV value at [row, column] = [31, 1] */ - (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0; - - temp.ia_array = ah->addac5416_21; - temp.ia_columns = ah->iniAddac.ia_columns; - temp.ia_rows = ah->iniAddac.ia_rows; - REG_WRITE_ARRAY(&temp, 1, regWrites); - } - + REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites); REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC); ENABLE_REGWRITE_BUFFER(ah); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index c32f9d1b215..30bf703c044 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -179,6 +179,25 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah) INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac, ARRAY_SIZE(ar5416Addac), 2); } + + /* iniAddac needs to be modified for these chips */ + if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) { + struct ar5416IniArray *addac = &ah->iniAddac; + u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns; + u32 *data; + + data = kmalloc(size, GFP_KERNEL); + if (!data) + return; + + memcpy(data, addac->ia_array, size); + addac->ia_array = data; + + if (!AR_SREV_5416_22_OR_LATER(ah)) { + /* override CLKDRV value */ + INI_RA(addac, 31,1) = 0; + } + } } /* Support for Japan ch.14 (2484) spread */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 029773c333c..c84c493a9e1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -34,98 +34,98 @@ static const u32 ar9300_2p2_radio_postamble[][5] = { static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, - {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, - {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, - {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, - {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, - {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, - {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, - {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, - {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402}, - {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404}, - {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, - {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, - {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, - {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20}, - {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20}, - {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22}, - {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, - {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, - {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, - {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861}, - {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81}, - {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83}, - {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84}, - {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3}, - {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5}, - {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9}, - {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb}, - {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, - {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, - {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, - {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, - {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, - {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202}, - {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400}, - {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402}, - {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404}, - {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603}, - {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02}, - {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04}, - {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20}, - {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20}, - {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22}, - {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, - {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, - {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, - {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861}, - {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81}, - {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83}, - {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84}, - {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3}, - {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5}, - {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9}, - {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb}, - {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, - {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, + {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, + {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, + {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, + {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, + {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, + {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, + {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, + {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, + {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, + {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, + {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, + {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, + {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, + {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, + {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, + {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, + {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, + {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, + {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, + {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, + {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83}, + {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, + {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, + {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, + {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, + {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, + {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, + {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, + {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, + {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, + {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, + {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, + {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, + {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, + {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, + {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, + {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, + {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, + {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, + {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, + {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, + {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, + {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, + {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, + {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, + {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, + {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, + {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, + {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, + {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, + {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, + {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, + {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, + {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000}, - {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501}, - {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501}, - {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03}, - {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04}, - {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04}, - {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, - {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, - {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, - {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000}, + {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000}, + {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501}, + {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501}, + {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03}, + {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04}, + {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04}, + {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, + {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, - {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, - {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, - {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, + {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, + {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584}, + {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800}, {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index ab21a491598..7f7bc947e00 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -69,13 +69,13 @@ #define AR9300_BASE_ADDR 0x3ff #define AR9300_BASE_ADDR_512 0x1ff -#define AR9300_OTP_BASE 0x14000 -#define AR9300_OTP_STATUS 0x15f18 +#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000) +#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18) #define AR9300_OTP_STATUS_TYPE 0x7 #define AR9300_OTP_STATUS_VALID 0x4 #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 #define AR9300_OTP_STATUS_SM_BUSY 0x1 -#define AR9300_OTP_READ_DATA 0x15f1c +#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c) enum targetPowerHTRates { HT_TARGET_RATE_0_8_16, diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index d4d8ceced89..b109c470858 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -159,6 +159,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); bf->bf_buf_addr = 0; + bf->bf_mpdu = NULL; } /* Get a new beacon from mac80211 */ diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index a1250c586e4..1f2f97f6038 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -19,7 +19,6 @@ /* Common calibration code */ -#define ATH9K_NF_TOO_HIGH -60 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) { @@ -335,10 +334,10 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) "NF calibrated [%s] [chain %d] is %d\n", (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); - if (nf[i] > ATH9K_NF_TOO_HIGH) { + if (nf[i] > limit->max) { ath_dbg(common, ATH_DBG_CALIBRATE, "NF[%d] (%d) > MAX (%d), correcting to MAX\n", - i, nf[i], ATH9K_NF_TOO_HIGH); + i, nf[i], limit->max); nf[i] = limit->max; } else if (nf[i] < limit->min) { ath_dbg(common, ATH_DBG_CALIBRATE, diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 77ec288b5a7..247f7f8c666 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -35,7 +35,7 @@ #define WME_AC_BK 3 #define WME_NUM_AC 4 -#define ATH_RSSI_DUMMY_MARKER 0x127 +#define ATH_RSSI_DUMMY_MARKER 127 #define ATH_RSSI_LPF_LEN 10 #define RSSI_LPF_THRESHOLD -20 #define ATH_RSSI_EP_MULTIPLIER (1<<7) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 61e6d395071..409971284bc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -823,7 +823,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || - priv->fw_version_minor != MINOR_VERSION_REQ) { + priv->fw_version_minor < MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 1b90ed8795c..4f7843ae680 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle, endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv, skb, htc_hdr->endpoint_id, txok); + } else { + kfree_skb(skb); } } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 7c2f06ed7a1..9130a5aa1c9 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -530,7 +530,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || - ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && + ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) && !ah->is_pciexpress)) { ah->config.serialize_regmode = SER_REG_MODE_ON; @@ -682,13 +682,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah) u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) { + struct ath_common *common = ath9k_hw_common(ah); + int i = 0; + REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); udelay(100); REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK); - while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) + while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) { + udelay(100); + if (WARN_ON_ONCE(i >= 100)) { + ath_err(common, "PLL4 meaurement not done\n"); + break; + } + + i++; + } + return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; } EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 939cc9d76c2..9dc2666fd1c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -763,7 +763,6 @@ struct ath_hw { u32 *analogBank6Data; u32 *analogBank6TPCData; u32 *analogBank7Data; - u32 *addac5416_21; u32 *bank6Temp; u8 txpower_limit; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a126a3e238f..806748ab43a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -648,6 +648,15 @@ void ath_hw_pll_work(struct work_struct *work) hw_pll_work.work); u32 pll_sqsum; + /* + * ensure that the PLL WAR is executed only + * after the STA is associated (or) if the + * beaconing had started in interfaces that + * uses beacons. + */ + if (!(sc->sc_flags & SC_OP_BEACONS)) + return; + if (AR_SREV_9485(sc->sc_ah)) { ath9k_ps_wakeup(sc); @@ -1782,6 +1791,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; + int key; ath_node_attach(sc, sta); @@ -1789,7 +1799,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, vif->type != NL80211_IFTYPE_AP_VLAN) return 0; - an->ps_key = ath_key_config(common, vif, sta, &ps_key); + key = ath_key_config(common, vif, sta, &ps_key); + if (key > 0) + an->ps_key = key; return 0; } @@ -1806,6 +1818,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc, return; ath_key_delete(common, &ps_key); + an->ps_key = 0; } static int ath9k_sta_remove(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index ea35843dd38..9d965e37b00 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, fc = hdr->frame_control; for (i = 0; i < sc->hw->max_rates; i++) { struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; - if (!rate->count) + if (rate->idx < 0 || !rate->count) break; final_ts_idx = i; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 3b5f9d6e3cb..932b3ab9407 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1697,7 +1697,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; - bool decrypt_error = false; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); @@ -1719,6 +1718,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) tsf_lower = tsf & 0xffffffff; do { + bool decrypt_error = false; /* If handling rx interrupt and flush is in progress => exit */ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) break; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 33443bcaa8d..6f6f1002e5d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -272,6 +272,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) } bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); + bf->bf_next = NULL; list_del(&bf->list); spin_unlock_bh(&sc->tx.txbuflock); @@ -1488,6 +1489,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, if (tid) INCR(tid->seq_start, IEEE80211_SEQ_MAX); + bf->bf_next = NULL; bf->bf_lastbf = bf; fi = get_frame_info(bf->bf_mpdu); ath_buf_set_rate(sc, bf, fi->framelen); diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index e94084fcf6f..f190f3219fc 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1245,6 +1245,7 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) atomic_dec(&ar->tx_ampdu_upload); tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + carl9170_release_dev_space(ar, skb); carl9170_tx_status(ar, skb, false); return true; } diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 47d44bcff37..5deeb14b823 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1390,8 +1390,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, struct b43_dmaring *ring; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; + static const struct b43_txstatus fake; /* filled with 0 */ + const struct b43_txstatus *txstat; int slot, firstused; bool frame_succeed; + int skip; + static u8 err_out1, err_out2; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1404,13 +1408,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; + + skip = 0; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in - * malfunction, memory leaks and/or stall of DMA functionality. */ - b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " - "Expected %d, but got %d\n", - ring->index, firstused, slot); - return; + * malfunction, memory leaks and/or stall of DMA functionality. + */ + if (slot == next_slot(ring, next_slot(ring, firstused))) { + /* If a single header/data pair was missed, skip over + * the first two slots in an attempt to recover. + */ + slot = firstused; + skip = 2; + if (!err_out1) { + /* Report the error once. */ + b43dbg(dev->wl, + "Skip on DMA ring %d slot %d.\n", + ring->index, slot); + err_out1 = 1; + } + } else { + /* More than a single header/data pair were missed. + * Report this error once. + */ + if (!err_out2) + b43dbg(dev->wl, + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + err_out2 = 1; + return; + } } ops = ring->ops; @@ -1424,11 +1451,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, slot, firstused, ring->index); break; } + if (meta->skb) { struct b43_private_tx_info *priv_info = - b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); + b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); - unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { @@ -1440,8 +1469,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { - /* This is a scatter-gather fragment of a frame, so - * the skb pointer must not be NULL. */ + /* This is a scatter-gather fragment of a frame, + * so the skb pointer must not be NULL. + */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); @@ -1452,9 +1482,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* * Call back to inform the ieee80211 subsystem about - * the status of the transmission. + * the status of the transmission. When skipping over + * a missed TX status report, use a status structure + * filled with zeros to indicate that the frame was not + * sent (frame_count 0) and not acknowledged */ - frame_succeed = b43_fill_txstatus_report(dev, info, status); + if (unlikely(skip)) + txstat = &fake; + else + txstat = status; + + frame_succeed = b43_fill_txstatus_report(dev, info, + txstat); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; @@ -1482,12 +1521,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; - if (meta->is_last_fragment) { + if (meta->is_last_fragment && !skip) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); + if (skip > 0) + --skip; } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 1ab8861dd43..4e8b48183fe 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -3843,6 +3843,8 @@ static void b43legacy_remove(struct ssb_device *dev) cancel_work_sync(&wldev->restart_work); B43legacy_WARN_ON(!wl); + if (!wldev->fw.ucode) + return; /* NULL if fw never loaded */ if (wl->current_dev == wldev) ieee80211_unregister_hw(wl->hw); diff --git a/drivers/net/wireless/bcmdhd/Makefile b/drivers/net/wireless/bcmdhd/Makefile index eda803e3ca7..40816c4ac57 100644 --- a/drivers/net/wireless/bcmdhd/Makefile +++ b/drivers/net/wireless/bcmdhd/Makefile @@ -8,7 +8,7 @@ DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \ -DNEW_COMPAT_WIRELESS -DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \ -DKEEP_ALIVE -DCSCAN -DGET_CUSTOM_MAC_ENABLE -DPKT_FILTER_SUPPORT \ -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \ - -DSET_RANDOM_MAC_SOFTAP -DWL_CFG80211_STA_EVENT \ + -DSET_RANDOM_MAC_SOFTAP -DWL_CFG80211_STA_EVENT -DSUPPORT_PM2_ONLY \ -Idrivers/net/wireless/bcmdhd -Idrivers/net/wireless/bcmdhd/include DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o \ @@ -25,6 +25,8 @@ endif ifneq ($(CONFIG_CFG80211),) bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o DHDCFLAGS += -DWL_CFG80211 +DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65 +DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15 endif ifneq ($(CONFIG_DHD_USE_SCHED_SCAN),) DHDCFLAGS += -DWL_SCHED_SCAN diff --git a/drivers/net/wireless/bcmdhd/bcmsdh.c b/drivers/net/wireless/bcmdhd/bcmsdh.c index f67b13a03a1..89320b6f53d 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh.c +++ b/drivers/net/wireless/bcmdhd/bcmsdh.c @@ -22,7 +22,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh.c 300445 2011-12-03 05:37:20Z $ + * $Id: bcmsdh.c 344235 2012-07-11 23:47:18Z $ */ /** @@ -362,9 +362,10 @@ bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length) } bcopy(cis, tmp_buf, length); for (tmp_ptr = tmp_buf, ptr = cis; ptr < (cis + length - 4); tmp_ptr++) { - ptr += sprintf((char*)ptr, "%.2x ", *tmp_ptr & 0xff); + ptr += snprintf((char*)ptr, (cis + length - ptr - 4), + "%.2x ", *tmp_ptr & 0xff); if ((((tmp_ptr - tmp_buf) + 1) & 0xf) == 0) - ptr += sprintf((char *)ptr, "\n"); + ptr += snprintf((char *)ptr, (cis + length - ptr -4), "\n"); } MFREE(bcmsdh->osh, tmp_buf, length); } diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c index d257ddab84a..91232bdb275 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh_linux.c +++ b/drivers/net/wireless/bcmdhd/bcmsdh_linux.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_linux.c 312788 2012-02-03 23:06:32Z $ + * $Id: bcmsdh_linux.c 352863 2012-08-24 04:48:50Z $ */ /** @@ -404,6 +404,10 @@ bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* match this pci device with what we support */ /* we can't solely rely on this to believe it is our SDIO Host Controller! */ if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) { + if (pdev->vendor == VENDOR_BROADCOM) { + SDLX_MSG(("%s: Unknown Broadcom device (vendor: %#x, device: %#x).\n", + __FUNCTION__, pdev->vendor, pdev->device)); + } return -ENODEV; } @@ -684,6 +688,11 @@ extern int sd_uhsimode; module_param(sd_uhsimode, int, 0); #endif +#ifdef BCMSDIOH_TXGLOM +extern uint sd_txglom; +module_param(sd_txglom, uint, 0); +#endif + #ifdef BCMSDH_MODULE EXPORT_SYMBOL(bcmsdh_attach); EXPORT_SYMBOL(bcmsdh_detach); diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c index 374154fb129..db051a41f7b 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_sdmmc.c 314904 2012-02-14 21:36:04Z $ + * $Id: bcmsdh_sdmmc.c 351910 2012-08-21 22:39:46Z $ */ #include @@ -799,41 +799,49 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *by #if defined(MMC_SDIO_ABORT) /* to allow abort command through F1 */ else if (regaddr == SDIOD_CCCR_IOABORT) { - sdio_claim_host(gInstance->func[func]); - /* - * this sdio_f0_writeb() can be replaced with another api - * depending upon MMC driver change. - * As of this time, this is temporaray one - */ - sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); - sdio_release_host(gInstance->func[func]); + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + /* + * this sdio_f0_writeb() can be replaced with another api + * depending upon MMC driver change. + * As of this time, this is temporaray one + */ + sdio_writeb(gInstance->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } } #endif /* MMC_SDIO_ABORT */ else if (regaddr < 0xF0) { sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr)); } else { /* Claim host controller, perform F0 write, and release */ - sdio_claim_host(gInstance->func[func]); - sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret); - sdio_release_host(gInstance->func[func]); + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + sdio_f0_writeb(gInstance->func[func], + *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } } } else { /* Claim host controller, perform Fn write, and release */ - sdio_claim_host(gInstance->func[func]); - sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); - sdio_release_host(gInstance->func[func]); + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret); + sdio_release_host(gInstance->func[func]); + } } } else { /* CMD52 Read */ /* Claim host controller, perform Fn read, and release */ - sdio_claim_host(gInstance->func[func]); - - if (func == 0) { - *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret); - } else { - *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret); + if (gInstance->func[func]) { + sdio_claim_host(gInstance->func[func]); + if (func == 0) { + *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret); + } else { + *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret); + } + sdio_release_host(gInstance->func[func]); } - - sdio_release_host(gInstance->func[func]); } if (err_ret) { @@ -1012,7 +1020,13 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func, pkt_len -= xfred_len; xfred_len = 0; } - pkt_len = (pkt_len + 3) & 0xFFFFFFFC; + + /* Align Patch */ + if (!write || pkt_len < 32) + pkt_len = (pkt_len + 3) & 0xFFFFFFFC; + else if (pkt_len % DHD_SDALIGN) + pkt_len += DHD_SDALIGN - (pkt_len % DHD_SDALIGN); + #ifdef CONFIG_MMC_MSM7X00A if ((pkt_len % 64) == 32) { sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__)); diff --git a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c index a78faebe2e7..c93e41c6fb4 100644 --- a/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c +++ b/drivers/net/wireless/bcmdhd/bcmsdh_sdmmc_linux.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: bcmsdh_sdmmc_linux.c 312783 2012-02-03 22:53:56Z $ + * $Id: bcmsdh_sdmmc_linux.c 331154 2012-05-04 00:41:40Z $ */ #include @@ -188,7 +188,6 @@ static int bcmsdh_sdmmc_suspend(struct device *pdev) if (dhd_os_check_wakelock(bcmsdh_get_drvdata())) return -EBUSY; - sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { @@ -202,7 +201,6 @@ static int bcmsdh_sdmmc_suspend(struct device *pdev) sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); return ret; } - #if defined(OOB_INTR_ONLY) bcmsdh_oob_intr_set(0); #endif /* defined(OOB_INTR_ONLY) */ diff --git a/drivers/net/wireless/bcmdhd/dhd.h b/drivers/net/wireless/bcmdhd/dhd.h index 725a909273d..c5a74cde1fe 100644 --- a/drivers/net/wireless/bcmdhd/dhd.h +++ b/drivers/net/wireless/bcmdhd/dhd.h @@ -24,7 +24,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd.h 333052 2012-05-12 02:09:28Z $ + * $Id: dhd.h 357954 2012-09-20 18:22:31Z $ */ /**************** @@ -301,6 +301,8 @@ extern int dhd_os_wake_unlock(dhd_pub_t *pub); extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub); extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val); extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val); +extern int dhd_os_wd_wake_lock(dhd_pub_t *pub); +extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub); inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp) { @@ -323,8 +325,10 @@ inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp) #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ } -#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) -#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub) +#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub) +#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub) +#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub) #define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub) #define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_rx_timeout_enable(pub, val) #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) dhd_os_wake_lock_ctrl_timeout_enable(pub, val) @@ -607,6 +611,25 @@ extern uint dhd_pktgen_len; #define MAX_PKTGEN_LEN 1800 #endif +/* hooks for custom glom setting option via Makefile */ +#define DEFAULT_GLOM_VALUE -1 +#ifndef CUSTOM_GLOM_SETTING +#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE +#endif + +/* hooks for custom Roaming Trigger setting via Makefile */ +#define DEFAULT_ROAM_TRIGGER_VALUE -65 /* dBm default roam trigger all band */ +#define DEFAULT_ROAM_TRIGGER_SETTING -1 +#ifndef CUSTOM_ROAM_TRIGGER_SETTING +#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE +#endif + +/* hooks for custom Roaming Romaing setting via Makefile */ +#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */ +#define DEFAULT_ROAM_DELTA_SETTING -1 +#ifndef CUSTOM_ROAM_DELTA_SETTING +#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE +#endif /* optionally set by a module_param_string() */ #define MOD_PARAM_PATHLEN 2048 diff --git a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c index 970216efc7a..351c372ffa8 100644 --- a/drivers/net/wireless/bcmdhd/dhd_cfg80211.c +++ b/drivers/net/wireless/bcmdhd/dhd_cfg80211.c @@ -69,6 +69,9 @@ s32 dhd_cfg80211_down(struct wl_priv *wl) return 0; } +/* + * dhd_cfg80211_set_p2p_info : gets called when GO or GC created + */ s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val) { dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); @@ -83,7 +86,7 @@ s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val) dhd_arp_offload_set(dhd, 0); dhd_arp_offload_enable(dhd, false); #endif /* ARP_OFFLOAD_SUPPORT */ - + /* diable all filtering in p2p mode */ dhd_os_set_packet_filter(dhd, 0); /* Setup timeout if Beacons are lost and roam is off to report link down */ @@ -94,6 +97,9 @@ s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val) return 0; } +/* + * dhd_cfg80211_clean_p2p_info : gets called when GO or GC terminated + */ s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl) { dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); @@ -108,7 +114,6 @@ s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl) dhd_arp_offload_set(dhd, dhd_arp_mode); dhd_arp_offload_enable(dhd, true); #endif /* ARP_OFFLOAD_SUPPORT */ - dhd_os_set_packet_filter(dhd, 1); /* Setup timeout if Beacons are lost and roam is off to report link down */ diff --git a/drivers/net/wireless/bcmdhd/dhd_common.c b/drivers/net/wireless/bcmdhd/dhd_common.c index d5af27f40b7..d46864c3a2a 100644 --- a/drivers/net/wireless/bcmdhd/dhd_common.c +++ b/drivers/net/wireless/bcmdhd/dhd_common.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_common.c 331276 2012-05-04 08:05:57Z $ + * $Id: dhd_common.c 380760 2013-01-23 21:59:27Z $ */ #include #include @@ -1767,14 +1767,11 @@ bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval) int dhd_get_dtim_skip(dhd_pub_t *dhd) { - int bcn_li_dtim; + int bcn_li_dtim = 1; + char buf[128]; int ret = -1; int dtim_assoc = 0; - - if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1)) - bcn_li_dtim = 3; - else - bcn_li_dtim = dhd->dtim_skip; + int ap_beacon = 0; /* Check if associated */ if (dhd_is_associated(dhd, NULL, NULL) == FALSE) { @@ -1782,15 +1779,34 @@ dhd_get_dtim_skip(dhd_pub_t *dhd) goto exit; } - /* if assoc grab ap's dtim value */ - if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD, - &dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) { + /* read AP beacon if do nother if APs Beacon more that 100msec */ + bcm_mkiovar("bi_assoc", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) { + DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); + goto exit; + } + + ap_beacon = dtoh32(*(int *)buf); + + /* if APs Beacon more that 100msec do no dtim skip */ + if (ap_beacon > 100) { + DHD_ERROR(("%s no dtim skip for AP with %d beacon\n", __FUNCTION__, ap_beacon)); + goto exit; + } + + + /* Read DTIM value if associated */ + memset(buf, 0, sizeof(buf)); + bcm_mkiovar("dtim_assoc", 0, 0, buf, sizeof(buf)); + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) { DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret)); goto exit; } - DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n", - __FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL)); + dtim_assoc = dtoh32(*(int *)buf); + + DHD_ERROR(("%s beacom=%d msec bcn_li_dtim=%d DTIM=%d Listen=%d\n", + __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL)); /* if not assocated just eixt */ if (dtim_assoc == 0) { @@ -1800,12 +1816,16 @@ dhd_get_dtim_skip(dhd_pub_t *dhd) /* check if sta listen interval fits into AP dtim */ if (dtim_assoc > LISTEN_INTERVAL) { /* AP DTIM to big for our Listen Interval : no dtim skiping */ - bcn_li_dtim = 1; DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n", __FUNCTION__, dtim_assoc, LISTEN_INTERVAL)); goto exit; } + if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1)) + bcn_li_dtim = 3; + else + bcn_li_dtim = dhd->dtim_skip; + if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) { /* Round up dtim_skip to fit into STAs Listen Interval */ bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc); diff --git a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c index 9750eeb23bc..de519a57bf8 100644 --- a/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c +++ b/drivers/net/wireless/bcmdhd/dhd_custom_gpio.c @@ -20,7 +20,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * -* $Id: dhd_custom_gpio.c,v 1.2.42.1 2010-10-19 00:41:09 Exp $ +* $Id: dhd_custom_gpio.c 339054 2012-06-15 04:56:55Z $ */ #include @@ -97,13 +97,13 @@ int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr) #endif /* CUSTOMER_HW2 */ if (dhd_oob_gpio_num < 0) { - WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n", + WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined\n", __FUNCTION__)); return (dhd_oob_gpio_num); } WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n", - __FUNCTION__, dhd_oob_gpio_num)); + __FUNCTION__, dhd_oob_gpio_num)); #if defined CUSTOMER_HW host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num); diff --git a/drivers/net/wireless/bcmdhd/dhd_linux.c b/drivers/net/wireless/bcmdhd/dhd_linux.c index d32ce1135f2..476a50a9964 100644 --- a/drivers/net/wireless/bcmdhd/dhd_linux.c +++ b/drivers/net/wireless/bcmdhd/dhd_linux.c @@ -22,7 +22,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_linux.c 333885 2012-05-18 00:39:03Z $ + * $Id: dhd_linux.c 352789 2012-08-24 00:01:33Z $ */ #include @@ -259,6 +259,7 @@ typedef struct dhd_info { struct wake_lock wl_wifi; /* Wifi wakelock */ struct wake_lock wl_rxwake; /* Wifi rx wakelock */ struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ + struct wake_lock wl_wdwake; /* Wifi wd wakelock */ #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) @@ -270,6 +271,7 @@ typedef struct dhd_info { #endif spinlock_t wakelock_spinlock; int wakelock_counter; + int wakelock_wd_counter; int wakelock_rx_timeout_enable; int wakelock_ctrl_timeout_enable; @@ -327,8 +329,8 @@ uint dhd_console_ms = 0; module_param(dhd_console_ms, uint, 0644); #endif /* defined(DHD_DEBUG) */ -/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ -uint dhd_arp_mode = 0xb; +/* ARP offload agent mode : enable ARP Peer Auto-Reply */ +uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY; module_param(dhd_arp_mode, uint, 0); /* ARP offload enable */ @@ -528,7 +530,9 @@ static void dhd_set_packet_filter(int value, dhd_pub_t *dhd) static int dhd_set_suspend(int value, dhd_pub_t *dhd) { +#if !defined(SUPPORT_PM2_ONLY) int power_mode = PM_MAX; +#endif /* wl_pkt_filter_enable_t enable_parm; */ char iovbuf[32]; int bcn_li_dtim = 3; @@ -542,10 +546,12 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) if (value && dhd->in_suspend) { /* Kernel suspended */ - DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__)); + DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__)); +#if !defined(SUPPORT_PM2_ONLY) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); +#endif /* Enable packet filter, only allow unicast packet to send up */ dhd_set_packet_filter(1, dhd); @@ -566,11 +572,13 @@ static int dhd_set_suspend(int value, dhd_pub_t *dhd) } else { /* Kernel resumed */ - DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__)); + DHD_ERROR(("%s: Remove extra suspend setting\n", __FUNCTION__)); +#if !defined(SUPPORT_PM2_ONLY) power_mode = PM_FAST; dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); +#endif /* disable pkt filter */ dhd_set_packet_filter(0, dhd); @@ -1026,7 +1034,7 @@ dhd_op_if(dhd_if_t *ifp) #endif netif_stop_queue(ifp->net); unregister_netdev(ifp->net); - ret = DHD_DEL_IF; + ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */ #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { @@ -1361,10 +1369,11 @@ dhd_start_xmit(struct sk_buff *skb, struct net_device *net) done: - if (ret) + if (ret) { dhd->pub.dstats.tx_dropped++; - else + } else { dhd->pub.tx_packets++; + } DHD_OS_WAKE_UNLOCK(&dhd->pub); @@ -1524,14 +1533,14 @@ dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) wl_event_to_host_order(&event); if (!tout_ctrl) tout_ctrl = DHD_PACKET_TIMEOUT_MS; - if (event.event_type == WLC_E_BTA_HCI_EVENT) { - dhd_bta_doevt(dhdp, data, event.datalen); - } #ifdef PNO_SUPPORT if (event.event_type == WLC_E_PFN_NET_FOUND) { - tout_ctrl *= 2; + tout_ctrl = 7 * DHD_PACKET_TIMEOUT_MS; } #endif /* PNO_SUPPORT */ + if (event.event_type == WLC_E_BTA_HCI_EVENT) { + dhd_bta_doevt(dhdp, data, event.datalen); + } } else { tout_rx = DHD_PACKET_TIMEOUT_MS; } @@ -1696,7 +1705,6 @@ dhd_watchdog_thread(void *data) dhd_os_spin_unlock(&dhd->pub, flags); } dhd_os_sdunlock(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); } else { break; } @@ -1710,9 +1718,7 @@ static void dhd_watchdog(ulong data) dhd_info_t *dhd = (dhd_info_t *)data; unsigned long flags; - DHD_OS_WAKE_LOCK(&dhd->pub); if (dhd->pub.dongle_reset) { - DHD_OS_WAKE_UNLOCK(&dhd->pub); return; } @@ -1736,7 +1742,6 @@ static void dhd_watchdog(ulong data) mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); dhd_os_spin_unlock(&dhd->pub, flags); dhd_os_sdunlock(&dhd->pub); - DHD_OS_WAKE_UNLOCK(&dhd->pub); } #ifdef DHDTHREAD @@ -2648,7 +2653,9 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) } sema_init(&dhd->proto_sem, 1); +#ifdef DHDTHREAD sema_init(&dhd->sdsem, 1); +#endif #ifdef PROP_TXSTATUS spin_lock_init(&dhd->wlfc_spinlock); @@ -2678,12 +2685,14 @@ dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) /* Initialize Wakelock stuff */ spin_lock_init(&dhd->wakelock_spinlock); dhd->wakelock_counter = 0; + dhd->wakelock_wd_counter = 0; dhd->wakelock_rx_timeout_enable = 0; dhd->wakelock_ctrl_timeout_enable = 0; #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); + wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake"); #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhd->dhd_net_if_mutex); @@ -2868,12 +2877,12 @@ dhd_bus_start(dhd_pub_t *dhdp) dhd->wd_timer_valid = FALSE; dhd_os_spin_unlock(&dhd->pub, flags); del_timer_sync(&dhd->timer); - DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); return -ENODEV; } @@ -2892,6 +2901,7 @@ dhd_bus_start(dhd_pub_t *dhdp) if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ + DHD_OS_WD_WAKE_UNLOCK(&dhd->pub); return -ENODEV; } @@ -2987,6 +2997,8 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) char *ptr; uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ uint16 chipID; + int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL}; + int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL}; #if defined(SOFTAP) uint dtim = 1; #endif @@ -3154,6 +3166,14 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); + /* custom romaing setting */ + if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger, + sizeof(roam_trigger), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret)); + if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta, + sizeof(roam_delta), TRUE, 0)) < 0) + DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret)); + /* Set PowerSave mode */ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); @@ -3212,6 +3232,7 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_SET_SSID); setbit(eventmask, WLC_E_PRUNE); setbit(eventmask, WLC_E_AUTH); + setbit(eventmask, WLC_E_ASSOC); setbit(eventmask, WLC_E_REASSOC); setbit(eventmask, WLC_E_REASSOC_IND); setbit(eventmask, WLC_E_DEAUTH); @@ -3226,8 +3247,10 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_MIC_ERROR); setbit(eventmask, WLC_E_ASSOC_REQ_IE); setbit(eventmask, WLC_E_ASSOC_RESP_IE); +#ifndef WL_CFG80211 setbit(eventmask, WLC_E_PMKID_CACHE); setbit(eventmask, WLC_E_TXFAIL); +#endif setbit(eventmask, WLC_E_JOIN_START); setbit(eventmask, WLC_E_SCAN_COMPLETE); #ifdef WLMEDIA_HTSF @@ -3244,7 +3267,6 @@ dhd_preinit_ioctls(dhd_pub_t *dhd) setbit(eventmask, WLC_E_ACTION_FRAME_RX); setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE); setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE); - setbit(eventmask, WLC_E_P2P_PROBREQ_MSG); setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); } #endif /* WL_CFG80211 */ @@ -3771,10 +3793,15 @@ void dhd_detach(dhd_pub_t *dhdp) if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { #ifdef CONFIG_HAS_WAKELOCK + dhd->wakelock_counter = 0; + dhd->wakelock_wd_counter = 0; + dhd->wakelock_rx_timeout_enable = 0; + dhd->wakelock_ctrl_timeout_enable = 0; wake_lock_destroy(&dhd->wl_wifi); wake_lock_destroy(&dhd->wl_rxwake); wake_lock_destroy(&dhd->wl_ctrlwake); -#endif + wake_lock_destroy(&dhd->wl_wdwake); +#endif /* CONFIG_HAS_WAKELOCK */ } } @@ -3967,11 +3994,16 @@ dhd_os_wd_timer(void *bus, uint wdtick) DHD_TRACE(("%s: Enter\n", __FUNCTION__)); + if (!dhd) + return; + flags = dhd_os_spin_lock(pub); /* don't start the wd until fw is loaded */ if (pub->busstate == DHD_BUS_DOWN) { dhd_os_spin_unlock(pub, flags); + if (!wdtick) + DHD_OS_WD_WAKE_UNLOCK(pub); return; } @@ -3984,10 +4016,12 @@ dhd_os_wd_timer(void *bus, uint wdtick) #else del_timer(&dhd->timer); #endif /* DHDTHREAD */ + DHD_OS_WD_WAKE_UNLOCK(pub); return; } if (wdtick) { + DHD_OS_WD_WAKE_LOCK(pub); dhd_watchdog_ms = (uint)wdtick; /* Re arm the timer, at last watchdog period */ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms)); @@ -4326,6 +4360,13 @@ dhd_dev_reset(struct net_device *dev, uint8 flag) dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); + if (flag == TRUE) { + /* Issue wl down command before resetting the chip */ + if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) { + DHD_TRACE(("%s: wl down failed\n", __FUNCTION__)); + } + } + ret = dhd_bus_devreset(&dhd->pub, flag); if (ret) { DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); @@ -4357,6 +4398,9 @@ int net_os_set_suspend(struct net_device *dev, int val, int force) ret = dhd_set_suspend(val, &dhd->pub); #else ret = dhd_suspend_resume_helper(dhd, val, force); +#endif +#ifdef WL_CFG80211 + wl_cfg80211_update_power_mode(dev); #endif } return ret; @@ -4506,7 +4550,7 @@ static void dhd_hang_process(struct work_struct *work) wl_iw_send_priv_event(dev, "HANG"); #endif #if defined(WL_CFG80211) - wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); + wl_cfg80211_hang(dev, WLAN_REASON_DRIVER_ERROR); #endif } } @@ -4835,7 +4879,8 @@ int dhd_os_check_wakelock(void *dhdp) return 0; dhd = (dhd_info_t *)(pub->info); - if (dhd && wake_lock_active(&dhd->wl_wifi)) + if (dhd && (wake_lock_active(&dhd->wl_wifi) || + wake_lock_active(&dhd->wl_wdwake))) return 1; #endif return 0; @@ -4851,6 +4896,44 @@ int net_os_wake_unlock(struct net_device *dev) return ret; } +int dhd_os_wd_wake_lock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); +#ifdef CONFIG_HAS_WAKELOCK + if (!dhd->wakelock_wd_counter) + wake_lock(&dhd->wl_wdwake); +#endif + dhd->wakelock_wd_counter++; + ret = dhd->wakelock_wd_counter; + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + +int dhd_os_wd_wake_unlock(dhd_pub_t *pub) +{ + dhd_info_t *dhd = (dhd_info_t *)(pub->info); + unsigned long flags; + int ret = 0; + + if (dhd) { + spin_lock_irqsave(&dhd->wakelock_spinlock, flags); + if (dhd->wakelock_wd_counter) { + dhd->wakelock_wd_counter = 0; +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&dhd->wl_wdwake); +#endif + } + spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); + } + return ret; +} + int dhd_os_check_if_up(void *dhdp) { dhd_pub_t *pub = (dhd_pub_t *)dhdp; diff --git a/drivers/net/wireless/bcmdhd/dhd_linux_mon.c b/drivers/net/wireless/bcmdhd/dhd_linux_mon.c deleted file mode 100644 index dd9c71f75be..00000000000 --- a/drivers/net/wireless/bcmdhd/dhd_linux_mon.c +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Broadcom Dongle Host Driver (DHD), Linux monitor network interface - * - * Copyright (C) 1999-2011, Broadcom Corporation - * - * Unless you and Broadcom execute a separate written software license - * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2 (the "GPL"), - * available at http://www.broadcom.com/licenses/GPLv2.php, with the - * following added to such license: - * - * As a special exception, the copyright holders of this software give you - * permission to link this software with independent modules, and to copy and - * distribute the resulting executable under terms of your choice, provided that - * you also meet, for each linked independent module, the terms and conditions of - * the license of that module. An independent module is a module which is not - * derived from this software. The special exception does not apply to any - * modifications of the software. - * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a license - * other than the GPL, without Broadcom's express prior written consent. - * - * $Id: dhd_linux_mon.c,v 1.131.2.55 2011-02-09 05:31:56 Exp $ - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -typedef enum monitor_states -{ - MONITOR_STATE_DEINIT = 0x0, - MONITOR_STATE_INIT = 0x1, - MONITOR_STATE_INTERFACE_ADDED = 0x2, - MONITOR_STATE_INTERFACE_DELETED = 0x4 -} monitor_states_t; -extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); - -/** - * Local declarations and defintions (not exposed) - */ -#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__) -#define MON_TRACE MON_PRINT - -typedef struct monitor_interface { - int radiotap_enabled; - struct net_device* real_ndev; /* The real interface that the monitor is on */ - struct net_device* mon_ndev; -} monitor_interface; - -typedef struct dhd_linux_monitor { - void *dhd_pub; - monitor_states_t monitor_state; - monitor_interface mon_if[DHD_MAX_IFS]; - struct mutex lock; /* lock to protect mon_if */ -} dhd_linux_monitor_t; - -static dhd_linux_monitor_t g_monitor; - -static struct net_device* lookup_real_netdev(char *name); -static monitor_interface* ndev_to_monif(struct net_device *ndev); -static int dhd_mon_if_open(struct net_device *ndev); -static int dhd_mon_if_stop(struct net_device *ndev); -static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); -static void dhd_mon_if_set_multicast_list(struct net_device *ndev); -static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); - -static const struct net_device_ops dhd_mon_if_ops = { - .ndo_open = dhd_mon_if_open, - .ndo_stop = dhd_mon_if_stop, - .ndo_start_xmit = dhd_mon_if_subif_start_xmit, - .ndo_set_multicast_list = dhd_mon_if_set_multicast_list, - .ndo_set_mac_address = dhd_mon_if_change_mac, -}; - -/** - * Local static function defintions - */ - -/* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" - * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") - */ -static struct net_device* lookup_real_netdev(char *name) -{ - int i; - int last_name_len = 0; - struct net_device *ndev; - struct net_device *ndev_found = NULL; - - /* We want to find interface "p2p-eth0-0" for monitor interface "mon.p2p-eth0-0", so - * we skip "eth0" even if "mon.p2p-eth0-0" contains "eth0" - */ - for (i = 0; i < DHD_MAX_IFS; i++) { - ndev = dhd_idx2net(g_monitor.dhd_pub, i); - if (ndev && strstr(name, ndev->name)) { - if (strlen(ndev->name) > last_name_len) { - ndev_found = ndev; - last_name_len = strlen(ndev->name); - } - } - } - - return ndev_found; -} - -static monitor_interface* ndev_to_monif(struct net_device *ndev) -{ - int i; - - for (i = 0; i < DHD_MAX_IFS; i++) { - if (g_monitor.mon_if[i].mon_ndev == ndev) - return &g_monitor.mon_if[i]; - } - - return NULL; -} - -static int dhd_mon_if_open(struct net_device *ndev) -{ - int ret = 0; - - MON_PRINT("enter\n"); - return ret; -} - -static int dhd_mon_if_stop(struct net_device *ndev) -{ - int ret = 0; - - MON_PRINT("enter\n"); - return ret; -} - -static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - int ret = 0; - int rtap_len; - int qos_len = 0; - int dot11_hdr_len = 24; - int snap_len = 6; - unsigned char *pdata; - unsigned short frame_ctl; - unsigned char src_mac_addr[6]; - unsigned char dst_mac_addr[6]; - struct ieee80211_hdr *dot11_hdr; - struct ieee80211_radiotap_header *rtap_hdr; - monitor_interface* mon_if; - - MON_PRINT("enter\n"); - - mon_if = ndev_to_monif(ndev); - if (mon_if == NULL || mon_if->real_ndev == NULL) { - MON_PRINT(" cannot find matched net dev, skip the packet\n"); - goto fail; - } - - if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) - goto fail; - - rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; - if (unlikely(rtap_hdr->it_version)) - goto fail; - - rtap_len = ieee80211_get_radiotap_len(skb->data); - if (unlikely(skb->len < rtap_len)) - goto fail; - - MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); - - /* Skip the ratio tap header */ - skb_pull(skb, rtap_len); - - dot11_hdr = (struct ieee80211_hdr *)skb->data; - frame_ctl = le16_to_cpu(dot11_hdr->frame_control); - /* Check if the QoS bit is set */ - if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { - /* Check if this ia a Wireless Distribution System (WDS) frame - * which has 4 MAC addresses - */ - if (dot11_hdr->frame_control & 0x0080) - qos_len = 2; - if ((dot11_hdr->frame_control & 0x0300) == 0x0300) - dot11_hdr_len += 6; - - memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); - memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); - - /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for - * for two MAC addresses - */ - skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); - pdata = (unsigned char*)skb->data; - memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); - memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); - - MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); - - /* Use the real net device to transmit the packet */ - ret = dhd_start_xmit(skb, mon_if->real_ndev); - - return ret; - } -fail: - dev_kfree_skb(skb); - return 0; -} - -static void dhd_mon_if_set_multicast_list(struct net_device *ndev) -{ - monitor_interface* mon_if; - - mon_if = ndev_to_monif(ndev); - if (mon_if == NULL || mon_if->real_ndev == NULL) { - MON_PRINT(" cannot find matched net dev, skip the packet\n"); - } - - MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); -} - -static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) -{ - int ret = 0; - monitor_interface* mon_if; - - mon_if = ndev_to_monif(ndev); - if (mon_if == NULL || mon_if->real_ndev == NULL) { - MON_PRINT(" cannot find matched net dev, skip the packet\n"); - } - - MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); - return ret; -} - -/** - * Global function definitions (declared in dhd_linux_mon.h) - */ - -int dhd_add_monitor(char *name, struct net_device **new_ndev) -{ - int i; - int idx = -1; - int ret = 0; - struct net_device* ndev = NULL; - dhd_linux_monitor_t **dhd_mon; - - mutex_lock(&g_monitor.lock); - - MON_TRACE("enter, if name: %s\n", name); - if (!name || !new_ndev) { - MON_PRINT("invalid parameters\n"); - ret = -EINVAL; - goto out; - } - - /* - * Find a vacancy - */ - for (i = 0; i < DHD_MAX_IFS; i++) - if (g_monitor.mon_if[i].mon_ndev == NULL) { - idx = i; - break; - } - if (idx == -1) { - MON_PRINT("exceeds maximum interfaces\n"); - ret = -EFAULT; - goto out; - } - - ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); - if (!ndev) { - MON_PRINT("failed to allocate memory\n"); - ret = -ENOMEM; - goto out; - } - - ndev->type = ARPHRD_IEEE80211_RADIOTAP; - strncpy(ndev->name, name, IFNAMSIZ); - ndev->name[IFNAMSIZ - 1] = 0; - ndev->netdev_ops = &dhd_mon_if_ops; - - ret = register_netdevice(ndev); - if (ret) { - MON_PRINT(" register_netdevice failed (%d)\n", ret); - goto out; - } - - *new_ndev = ndev; - g_monitor.mon_if[idx].radiotap_enabled = TRUE; - g_monitor.mon_if[idx].mon_ndev = ndev; - g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); - dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); - *dhd_mon = &g_monitor; - g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; - MON_PRINT("net device returned: 0x%p\n", ndev); - MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); - -out: - if (ret && ndev) - free_netdev(ndev); - - mutex_unlock(&g_monitor.lock); - return ret; - -} - -int dhd_del_monitor(struct net_device *ndev) -{ - int i; - bool rollback_lock = false; - if (!ndev) - return -EINVAL; - mutex_lock(&g_monitor.lock); - for (i = 0; i < DHD_MAX_IFS; i++) { - if (g_monitor.mon_if[i].mon_ndev == ndev || - g_monitor.mon_if[i].real_ndev == ndev) { - g_monitor.mon_if[i].real_ndev = NULL; - if (rtnl_is_locked()) { - rtnl_unlock(); - rollback_lock = true; - } - unregister_netdev(g_monitor.mon_if[i].mon_ndev); - free_netdev(g_monitor.mon_if[i].mon_ndev); - g_monitor.mon_if[i].mon_ndev = NULL; - g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; - break; - } - } - if (rollback_lock) { - rtnl_lock(); - rollback_lock = false; - } - - if (g_monitor.monitor_state != - MONITOR_STATE_INTERFACE_DELETED) - MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n", - ndev); - mutex_unlock(&g_monitor.lock); - - return 0; -} - -int dhd_monitor_init(void *dhd_pub) -{ - if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { - g_monitor.dhd_pub = dhd_pub; - mutex_init(&g_monitor.lock); - g_monitor.monitor_state = MONITOR_STATE_INIT; - } - return 0; -} - -int dhd_monitor_uninit(void) -{ - int i; - struct net_device *ndev; - bool rollback_lock = false; - mutex_lock(&g_monitor.lock); - if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { - for (i = 0; i < DHD_MAX_IFS; i++) { - ndev = g_monitor.mon_if[i].mon_ndev; - if (ndev) { - if (rtnl_is_locked()) { - rtnl_unlock(); - rollback_lock = true; - } - unregister_netdev(ndev); - free_netdev(ndev); - g_monitor.mon_if[i].real_ndev = NULL; - g_monitor.mon_if[i].mon_ndev = NULL; - if (rollback_lock) { - rtnl_lock(); - rollback_lock = false; - } - } - } - g_monitor.monitor_state = MONITOR_STATE_DEINIT; - } - mutex_unlock(&g_monitor.lock); - return 0; -} diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c index e578d702ce1..9ad997098b7 100644 --- a/drivers/net/wireless/bcmdhd/dhd_sdio.c +++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c @@ -21,7 +21,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: dhd_sdio.c 326662 2012-04-10 06:38:08Z $ + * $Id: dhd_sdio.c 352730 2012-08-23 20:55:11Z $ */ #include @@ -620,6 +620,7 @@ dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok) if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n", __FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl)); + dhd_os_send_hang_message(bus->dhd); return BCME_ERROR; } @@ -1375,7 +1376,13 @@ dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen) /* Send from dpc */ bus->ctrl_frame_buf = frame; bus->ctrl_frame_len = len; - dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + if (!bus->dpc_sched) { + bus->dpc_sched = TRUE; + dhd_sched_dpc(bus->dhd); + } + if (bus->ctrl_frame_stat) { + dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat); + } if (bus->ctrl_frame_stat == FALSE) { DHD_INFO(("%s: ctrl_frame_stat == FALSE\n", __FUNCTION__)); ret = 0; @@ -1487,7 +1494,7 @@ dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen) dhd_os_sdunlock(bus->dhd); #endif /* DHD_DEBUG */ } else if (pending == TRUE) { - /* signal pending */ + /* possibly fw hangs so never responsed back */ DHD_ERROR(("%s: signal pending\n", __FUNCTION__)); return -EINTR; } else { @@ -5192,7 +5199,7 @@ dhd_bus_console_in(dhd_pub_t *dhdp, uchar *msg, uint msglen) } #endif /* DHD_DEBUG */ -#ifdef DHD_DEBUG +#if (defined DHD_DEBUG) static void dhd_dump_cis(uint fn, uint8 *cis) { @@ -5468,12 +5475,11 @@ dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva, clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) { - DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", - err, DHD_INIT_CLKCTL1, clkctl)); + DHD_ERROR(("%s: ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + __FUNCTION__, err, DHD_INIT_CLKCTL1, clkctl)); goto fail; } - #ifdef DHD_DEBUG if (DHD_INFO_ON()) { uint fn, numfn; diff --git a/drivers/net/wireless/bcmdhd/include/bcmutils.h b/drivers/net/wireless/bcmdhd/include/bcmutils.h index 6849c26da83..a570fa2789f 100644 --- a/drivers/net/wireless/bcmdhd/include/bcmutils.h +++ b/drivers/net/wireless/bcmdhd/include/bcmutils.h @@ -603,6 +603,8 @@ extern void *_bcmutils_dummy_fn; #define CRC32_INIT_VALUE 0xffffffff #define CRC32_GOOD_VALUE 0xdebb20e3 +#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5] typedef struct bcm_bit_desc { uint32 bit; diff --git a/drivers/net/wireless/bcmdhd/include/epivers.h b/drivers/net/wireless/bcmdhd/include/epivers.h index 5df25c16b7d..fac87f500d1 100644 --- a/drivers/net/wireless/bcmdhd/include/epivers.h +++ b/drivers/net/wireless/bcmdhd/include/epivers.h @@ -33,17 +33,17 @@ #define EPI_RC_NUMBER 195 -#define EPI_INCREMENTAL_NUMBER 75 +#define EPI_INCREMENTAL_NUMBER 114 #define EPI_BUILD_NUMBER 0 -#define EPI_VERSION 5, 90, 195, 75 +#define EPI_VERSION 5, 90, 195, 114 -#define EPI_VERSION_NUM 0x055ac34b +#define EPI_VERSION_NUM 0x055ac372 #define EPI_VERSION_DEV 5.90.195 -#define EPI_VERSION_STR "5.90.195.75" +#define EPI_VERSION_STR "5.90.195.114" #endif diff --git a/drivers/net/wireless/bcmdhd/include/sbchipc.h b/drivers/net/wireless/bcmdhd/include/sbchipc.h index aa4df443e13..78ced30c502 100644 --- a/drivers/net/wireless/bcmdhd/include/sbchipc.h +++ b/drivers/net/wireless/bcmdhd/include/sbchipc.h @@ -5,7 +5,7 @@ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, * GPIO interface, extbus, and support for serial and parallel flashes. * - * $Id: sbchipc.h 333924 2012-05-18 04:48:52Z $ + * $Id: sbchipc.h 343982 2012-07-11 00:29:37Z $ * * Copyright (C) 1999-2011, Broadcom Corporation * @@ -1726,6 +1726,11 @@ typedef volatile struct { #define SECI_MODE_SHIFT 4 #define SECI_UPD_SECI (1 << 7) +#define SECI_SLIP_ESC_CHAR 0xDB +#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR +#define SECI_SIGNOFF_1 0 +#define SECI_REFRESH_REQ 0xDA + #define CLKCTL_STS_SECI_CLK_REQ (1 << 8) #define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) diff --git a/drivers/net/wireless/bcmdhd/include/sdioh.h b/drivers/net/wireless/bcmdhd/include/sdioh.h index 5f47d6f88ce..1d820d1569e 100644 --- a/drivers/net/wireless/bcmdhd/include/sdioh.h +++ b/drivers/net/wireless/bcmdhd/include/sdioh.h @@ -22,7 +22,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: sdioh.h 300017 2011-12-01 20:30:27Z $ + * $Id: sdioh.h 345478 2012-07-18 06:45:15Z $ */ #ifndef _SDIOH_H @@ -90,6 +90,10 @@ #define SD3_PresetVal_SDR50 0x06a #define SD3_PresetVal_SDR104 0x06c #define SD3_PresetVal_DDR50 0x06e +/* SDIO3.0 Revx specific Registers */ +#define SD3_Tuning_Info_Register 0x0EC +#define SD3_WL_BT_reset_register 0x0F0 + /* preset value indices */ #define SD3_PRESETVAL_INITIAL_IX 0 diff --git a/drivers/net/wireless/bcmdhd/include/wlioctl.h b/drivers/net/wireless/bcmdhd/include/wlioctl.h index e543bfa6d17..2038e202a46 100644 --- a/drivers/net/wireless/bcmdhd/include/wlioctl.h +++ b/drivers/net/wireless/bcmdhd/include/wlioctl.h @@ -24,7 +24,7 @@ * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * - * $Id: wlioctl.h 331292 2012-05-04 09:04:23Z $ + * $Id: wlioctl.h 353331 2012-08-27 06:04:47Z $ */ @@ -83,44 +83,6 @@ typedef struct wl_af_params { #include - - - -#define LEGACY2_WL_BSS_INFO_VERSION 108 - - -typedef struct wl_bss_info_108 { - uint32 version; - uint32 length; - struct ether_addr BSSID; - uint16 beacon_period; - uint16 capability; - uint8 SSID_len; - uint8 SSID[32]; - struct { - uint count; - uint8 rates[16]; - } rateset; - chanspec_t chanspec; - uint16 atim_window; - uint8 dtim_period; - int16 RSSI; - int8 phy_noise; - - uint8 n_cap; - uint32 nbss_cap; - uint8 ctl_ch; - uint32 reserved32[1]; - uint8 flags; - uint8 reserved[3]; - uint8 basic_mcs[MCSSET_LEN]; - - uint16 ie_offset; - uint32 ie_length; - - -} wl_bss_info_108_t; - #define WL_BSS_INFO_VERSION 109 @@ -157,30 +119,19 @@ typedef struct wl_bss_info { } wl_bss_info_t; -typedef struct wl_bsscfg { - uint32 wsec; - uint32 WPA_auth; - uint32 wsec_index; - uint32 associated; - uint32 BSS; - uint32 phytest_on; - struct ether_addr prev_BSSID; - struct ether_addr BSSID; -} wl_bsscfg_t; -typedef struct wl_bss_config { - uint32 atim_window; - uint32 beacon_period; - uint32 chanspec; -} wl_bss_config_t; +#define WL_BSS_FLAGS_FROM_BEACON 0x01 +#define WL_BSS_FLAGS_FROM_CACHE 0x02 +#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 + +#define VHT_BI_SGI_80MHZ 0x00000100 typedef struct wlc_ssid { uint32 SSID_len; uchar SSID[32]; } wlc_ssid_t; -#define WL_BSS_FLAGS_FROM_BEACON 0x01 #define WL_BSSTYPE_INFRA 1 #define WL_BSSTYPE_INDEP 0 diff --git a/drivers/net/wireless/bcmdhd/wl_android.c b/drivers/net/wireless/bcmdhd/wl_android.c index 4fcdcd32a3f..ce74f0f9461 100644 --- a/drivers/net/wireless/bcmdhd/wl_android.c +++ b/drivers/net/wireless/bcmdhd/wl_android.c @@ -736,7 +736,7 @@ int wifi_set_power(int on, unsigned long msec) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) int wifi_get_mac_addr(unsigned char *buf) { - DHD_ERROR(("%s\n", __FUNCTION__)); + DHD_TRACE(("%s\n", __FUNCTION__)); if (!buf) return -EINVAL; if (wifi_control_data && wifi_control_data->get_mac_addr) { diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.c b/drivers/net/wireless/bcmdhd/wl_cfg80211.c index 2365bdd4a54..b28dd59ea57 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.c +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.c @@ -188,6 +188,8 @@ static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx); static s32 wl_cfg80211_resume(struct wiphy *wiphy); +static s32 wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + struct net_device *dev, u64 cookie); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow); #else @@ -329,7 +331,6 @@ static __used bool wl_is_ibssstarter(struct wl_priv *wl); */ static s32 __wl_cfg80211_up(struct wl_priv *wl); static s32 __wl_cfg80211_down(struct wl_priv *wl); -static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add); static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e); static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev); static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e); @@ -358,6 +359,7 @@ static s32 wl_iscan_done(struct wl_priv *wl); static s32 wl_iscan_pending(struct wl_priv *wl); static s32 wl_iscan_inprogress(struct wl_priv *wl); static s32 wl_iscan_aborted(struct wl_priv *wl); +static void wl_scan_timeout_process(struct work_struct *work); /* * find most significant bit set @@ -951,7 +953,7 @@ wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, struct net_device *dev) struct net_device *ndev = wl_to_prmry_ndev(wl); WL_ERR(("Firmware returned an error (%d) from p2p_ifdel" "HANG Notification sent to %s\n", ret, ndev->name)); - wl_cfg80211_hang(ndev, WLAN_REASON_UNSPECIFIED); + wl_cfg80211_hang(ndev, WLAN_REASON_DRIVER_ERROR); } /* Wait for any pending scan req to get aborted from the sysioc context */ @@ -1518,6 +1520,8 @@ wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev, s32 passive_scan; wl_scan_results_t *results; WL_SCAN(("Enter \n")); + + mutex_lock(&wl->usr_sync); wl->escan_info.ndev = ndev; wl->escan_info.wiphy = wiphy; wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING; @@ -1526,7 +1530,7 @@ wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev, &passive_scan, sizeof(passive_scan), false); if (unlikely(err)) { WL_ERR(("error (%d)\n", err)); - return err; + goto exit; } results = (wl_scan_results_t *) wl->escan_info.escan_buf; results->version = 0; @@ -1534,6 +1538,8 @@ wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev, results->buflen = WL_SCAN_RESULTS_FIXED_SIZE; err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START); +exit: + mutex_unlock(&wl->usr_sync); return err; } @@ -1580,7 +1586,6 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, /* Arm scan timeout timer */ mod_timer(&wl->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS)); iscan_req = false; - wl->scan_request = request; if (request) { /* scan bss */ ssids = request->ssids; if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) { @@ -1661,6 +1666,7 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, /* we don't do iscan in ibss */ ssids = this_ssid; } + wl->scan_request = request; wl_set_drv_status(wl, SCANNING, ndev); if (iscan_req) { err = wl_do_iscan(wl, request); @@ -1724,12 +1730,9 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, scan_out: wl_clr_drv_status(wl, SCANNING, ndev); - if (wl->scan_request) { - if (timer_pending(&wl->scan_timeout)) - del_timer_sync(&wl->scan_timeout); - cfg80211_scan_done(wl->scan_request, true); - wl->scan_request = NULL; - } + if (timer_pending(&wl->scan_timeout)) + del_timer_sync(&wl->scan_timeout); + wl->scan_request = NULL; return err; } @@ -1750,6 +1753,7 @@ wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, wl->scan_busy_count++; if (wl->scan_busy_count > WL_SCAN_BUSY_MAX) { wl->scan_busy_count = 0; + WL_ERR(("Continuous scan failures!! Exercising FW hang recovery\n")); net_os_send_hang_message(ndev); } } @@ -1862,6 +1866,7 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, /* Clean BSSID */ bzero(&bssid, sizeof(bssid)); wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID); + wl_update_prof(wl, dev, NULL, params->bssid, WL_PROF_PENDING_BSSID); if (params->ssid) WL_INFO(("SSID: %s\n", params->ssid)); @@ -2333,6 +2338,7 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, /* Clean BSSID */ bzero(&bssid, sizeof(bssid)); wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID); + wl_update_prof(wl, dev, NULL, sme->bssid, WL_PROF_PENDING_BSSID); if (IS_P2P_SSID(sme->ssid) && (dev != wl_to_prmry_ndev(wl))) { /* we only allow to connect using virtual interface in case of P2P */ @@ -2660,7 +2666,7 @@ wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, WL_ERR(("WLC_GET_WSEC error (%d)\n", err)); return err; } - if (wsec & WEP_ENABLED) { + if (wsec == WEP_ENABLED) { /* Just select a new current key */ index = (u32) key_idx; index = htod32(index); @@ -2780,7 +2786,9 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, bssidx = wl_cfgp2p_find_idx(wl, dev); - if (mac_addr) { + if (mac_addr && + ((params->cipher != WLAN_CIPHER_SUITE_WEP40) && + (params->cipher != WLAN_CIPHER_SUITE_WEP104))) { wl_add_keyext(wiphy, dev, key_idx, mac_addr, params); goto exit; } @@ -2999,6 +3007,7 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, sta->idle * 1000)); #endif } else if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_BSS) { + get_pktcnt_t pktcnt; u8 *curmacp = wl_read_prof(wl, dev, WL_PROF_BSSID); err = -ENODEV; if (!wl_get_drv_status(wl, CONNECTED, dev) || @@ -3035,6 +3044,19 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, sinfo->signal = rssi; WL_DBG(("RSSI %d dBm\n", rssi)); + err = wldev_ioctl(dev, WLC_GET_PKTCNTS, &pktcnt, + sizeof(pktcnt), false); + if (!err) { + sinfo->filled |= (STATION_INFO_RX_PACKETS | + STATION_INFO_RX_DROP_MISC | + STATION_INFO_TX_PACKETS | + STATION_INFO_TX_FAILED); + sinfo->rx_packets = pktcnt.rx_good_pkt; + sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt; + sinfo->tx_packets = pktcnt.tx_good_pkt; + sinfo->tx_failed = pktcnt.tx_bad_pkt; + } + get_station_err: if (err && (err != -ETIMEDOUT) && (err != -EIO)) { /* Disconnect due to zero BSSID or error to get RSSI */ @@ -3048,6 +3070,23 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, return err; } +int wl_cfg80211_update_power_mode(struct net_device *dev) +{ + int pm = -1; + int err; + + err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), false); + if (err || (pm == -1)) { + WL_ERR(("error (%d)\n", err)); + } else { + pm = (pm == PM_OFF) ? false : true; + WL_DBG(("%s: %d\n", __func__, pm)); + if (dev->ieee80211_ptr) + dev->ieee80211_ptr->ps = pm; + } + return err; +} + static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, s32 timeout) @@ -3055,7 +3094,9 @@ wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, s32 pm; s32 err = 0; struct wl_priv *wl = wiphy_priv(wiphy); - +#if !defined(SUPPORT_PM2_ONLY) + dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub); +#endif CHECK_SYS_UP(wl); WL_DBG(("Enter : power save %s\n", (enabled ? "enable" : "disable"))); @@ -3063,7 +3104,11 @@ wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, return err; } +#if !defined(SUPPORT_PM2_ONLY) + pm = enabled ? ((dhd->in_suspend) ? PM_MAX : PM_FAST) : PM_OFF; +#else pm = enabled ? PM_FAST : PM_OFF; +#endif pm = htod32(pm); err = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), true); if (unlikely(err)) { @@ -3497,6 +3542,7 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev, wifi_p2p_ie_t *p2p_ie; wpa_ie_fixed_t *wps_ie; scb_val_t scb_val; + wifi_wfd_ie_t *wfd_ie; const struct ieee80211_mgmt *mgmt; struct wl_priv *wl = wiphy_priv(wiphy); struct net_device *dev = NULL; @@ -3504,6 +3550,7 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev, s32 bssidx = 0; u32 p2pie_len = 0; u32 wpsie_len = 0; + u32 wfdie_len = 0; u32 id; u32 retry = 0; bool ack = false; @@ -3553,6 +3600,11 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev, /* Total length of P2P Information Element */ p2pie_len = p2p_ie->len + sizeof(p2p_ie->len) + sizeof(p2p_ie->id); } + if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)(buf + ie_offset), ie_len)) + != NULL) { + /* Total length of WFD Information Element */ + wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id); + } if ((wps_ie = wl_cfgp2p_find_wpsie((u8 *)(buf + ie_offset), ie_len)) != NULL) { /* Order of Vendor IE is 1) WPS IE + @@ -3564,7 +3616,7 @@ wl_cfg80211_mgmt_tx(struct wiphy *wiphy, struct net_device *ndev, sizeof(wps_ie->tag); wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_PRBRSP_FLAG, - (u8 *)wps_ie, wpsie_len + p2pie_len); + (u8 *)wps_ie, wpsie_len + p2pie_len+ wfdie_len); } cfg80211_mgmt_tx_status(ndev, *cookie, buf, len, true, GFP_KERNEL); goto exit; @@ -4073,11 +4125,13 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, wpa_ie_fixed_t *wpa_ie; bcm_tlv_t *wpa2_ie; wifi_p2p_ie_t *p2p_ie; + wifi_wfd_ie_t *wfd_ie; bool is_bssup = false; bool update_bss = false; bool pbc = false; u16 wpsie_len = 0; u16 p2pie_len = 0; + u32 wfdie_len = 0; u8 beacon_ie[IE_MAX_LEN]; s32 ie_offset = 0; s32 bssidx = 0; @@ -4135,10 +4189,24 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, } else { WL_ERR(("No P2PIE in beacon \n")); } + /* find the WFD IEs */ + if ((wfd_ie = wl_cfgp2p_find_wfdie((u8 *)info->tail, info->tail_len)) != NULL) { + /* Total length of P2P Information Element */ + wfdie_len = wfd_ie->len + sizeof(wfd_ie->len) + sizeof(wfd_ie->id); + if ((wpsie_len + p2pie_len + wfdie_len) < IE_MAX_LEN) { + memcpy(&beacon_ie[wpsie_len + p2pie_len], wfd_ie, wfdie_len); + } else { + WL_ERR(("Found WFD IE but there is no space, (%d)(%d)(%d)\n", + wpsie_len, p2pie_len, wfdie_len)); + wfdie_len = 0; + } + } else { + WL_ERR(("No WFDIE in beacon \n")); + } /* add WLC_E_PROBREQ_MSG event to respose probe_request from STA */ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, pbc); wl_cfgp2p_set_management_ie(wl, dev, bssidx, VNDR_IE_BEACON_FLAG, - beacon_ie, wpsie_len + p2pie_len); + beacon_ie, wpsie_len + p2pie_len + wfdie_len); /* find the RSN_IE */ if ((wpa2_ie = bcm_parse_tlvs((u8 *)info->tail, info->tail_len, @@ -4408,8 +4476,8 @@ wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev, } #ifdef WL_SCHED_SCAN -#define PNO_TIME 30 -#define PNO_REPEAT 4 +#define PNO_TIME 30 +#define PNO_REPEAT 4 #define PNO_FREQ_EXPO_MAX 2 int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, @@ -4425,9 +4493,11 @@ int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, int i; int ret = 0; - WL_DBG(("Enter n_match_sets:%d n_ssids:%d \n", + WL_DBG(("Enter \n")); + WL_PNO((">>> SCHED SCAN START\n")); + WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n", request->n_match_sets, request->n_ssids)); - WL_DBG(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n", + WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n", request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max)); #if defined(WL_ENABLE_P2P_IF) @@ -4450,7 +4520,7 @@ int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, ssid = &request->match_sets[i].ssid; memcpy(ssids_local[i].SSID, ssid->ssid, ssid->ssid_len); ssids_local[i].SSID_len = ssid->ssid_len; - WL_DBG((">>> PNO filter set for ssid (%s) \n", ssid->ssid)); + WL_PNO((">>> PNO filter set for ssid (%s) \n", ssid->ssid)); ssid_count++; } } @@ -4458,7 +4528,7 @@ int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, if (request->n_ssids > 0) { for (i = 0; i < request->n_ssids; i++) { /* Active scan req for ssids */ - WL_DBG((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid)); + WL_PNO((">>> Active scan req for ssid (%s) \n", request->ssids[i].ssid)); /* match_set ssids is a supert set of n_ssid list, so we need * not add these set seperately @@ -4491,6 +4561,7 @@ int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) struct wl_priv *wl = wiphy_priv(wiphy); WL_DBG(("Enter \n")); + WL_PNO((">>> SCHED SCAN STOP\n")); if (dhd_dev_pno_enable(dev, 0) < 0) WL_ERR(("PNO disable failed")); @@ -4499,6 +4570,7 @@ int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) WL_ERR(("PNO reset failed")); if (wl->scan_request && wl->sched_scan_running) { + WL_PNO((">>> Sched scan running. Aborting it..\n")); wl_notify_escan_complete(wl, dev, true, true); } @@ -4541,6 +4613,7 @@ static struct cfg80211_ops wl_cfg80211_ops = { .set_channel = wl_cfg80211_set_channel, .set_beacon = wl_cfg80211_add_set_beacon, .add_beacon = wl_cfg80211_add_set_beacon, + .mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait, #ifdef WL_SCHED_SCAN .sched_scan_start = wl_cfg80211_sched_scan_start, .sched_scan_stop = wl_cfg80211_sched_scan_stop, @@ -4609,8 +4682,10 @@ static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev #endif WIPHY_FLAG_4ADDR_STATION; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) - wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; + /* wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; */ #endif + /* AP_SME flag can be advertised to remove patch from wpa_supplicant */ + wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME; WL_DBG(("Registering custom regulatory)\n")); wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom); @@ -4700,8 +4775,9 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi) } notif_bss_info->rssi = dtoh16(bi->RSSI); memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN); - mgmt_type = wl->active_scan ? - IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON; + mgmt_type = (bi->flags & WL_BSS_FLAGS_FROM_BEACON) ? + IEEE80211_STYPE_BEACON : IEEE80211_STYPE_PROBE_RESP; + if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) { mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type); } @@ -4730,7 +4806,7 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi) return -EINVAL; } - WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM " + WL_DBG(("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM" "mgmt_type %d frame_len %d\n", bi->SSID, notif_bss_info->rssi, notif_bss_info->channel, mgmt->u.beacon.capab_info, &bi->BSSID, mgmt_type, @@ -4739,11 +4815,11 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi) signal = notif_bss_info->rssi * 100; if (!mgmt->u.probe_resp.timestamp) { - struct timeval tv; + struct timespec ts; - do_gettimeofday(&tv); - mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec * 1000000) - + tv.tv_usec; + get_monotonic_boottime(&ts); + mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec * 1000000) + + ts.tv_nsec / 1000; } cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt, @@ -5078,7 +5154,8 @@ wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev, WL_DBG(("wl_ibss_join_done succeeded\n")); } else { if (!wl_get_drv_status(wl, DISCONNECTING, ndev)) { - printk("wl_bss_connect_done succeeded\n"); + printk("wl_bss_connect_done succeeded with " MACDBG "\n", + MAC2STRDBG((u8*)(&e->addr))); wl_bss_connect_done(wl, ndev, e, data, true); WL_DBG(("joined in BSS network \"%s\"\n", ((struct wlc_ssid *) @@ -5127,8 +5204,8 @@ wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev, wl_clr_drv_status(wl, DISCONNECTING, ndev); } else if (wl_is_nonetwork(wl, e)) { - printk("connect failed event=%d e->status 0x%x\n", - event, (int)ntoh32(e->status)); + printk("connect failed event=%d e->status %d e->reason %d\n", + event, (int)ntoh32(e->status), (int)ntoh32(e->reason)); /* Clean up any pending scan request */ if (wl->scan_request) { if (wl->escan_on) { @@ -5388,9 +5465,17 @@ wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev, u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID); WL_DBG((" enter\n")); + if (wl->scan_request) { wl_notify_escan_complete(wl, ndev, true, true); } + if (is_zero_ether_addr(curbssid)) { + curbssid = wl_read_prof(wl, ndev, WL_PROF_PENDING_BSSID); + if (is_zero_ether_addr(curbssid)) { + WL_ERR(("Invalid BSSID\n")); + curbssid = NULL; + } + } if (wl_get_drv_status(wl, CONNECTING, ndev)) { wl_clr_drv_status(wl, CONNECTING, ndev); if (completed) { @@ -5407,7 +5492,9 @@ wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, - completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT, + completed ? WLAN_STATUS_SUCCESS : + (e->reason) ? ntoh32(e->reason) : + WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); if (completed) WL_INFO(("Report connect result - connection succeeded\n")); @@ -5471,19 +5558,19 @@ static s32 wl_notify_pfn_status(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { - WL_ERR((" PNO Event\n")); + WL_ERR((">>> PNO Event\n")); - mutex_lock(&wl->usr_sync); #ifndef WL_SCHED_SCAN + mutex_lock(&wl->usr_sync); /* TODO: Use cfg80211_sched_scan_results(wiphy); */ cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); + mutex_unlock(&wl->usr_sync); #else /* If cfg80211 scheduled scan is supported, report the pno results via sched * scan results */ wl_notify_sched_scan_results(wl, ndev, e, data); #endif /* WL_SCHED_SCAN */ - mutex_unlock(&wl->usr_sync); return 0; } #endif /* PNO_SUPPORT */ @@ -5540,14 +5627,15 @@ wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev, del_timer_sync(&wl->scan_timeout); spin_lock_irqsave(&wl->cfgdrv_lock, flags); if (wl->scan_request) { - WL_DBG(("cfg80211_scan_done\n")); cfg80211_scan_done(wl->scan_request, false); wl->scan_request = NULL; } spin_unlock_irqrestore(&wl->cfgdrv_lock, flags); + WL_DBG(("cfg80211_scan_done\n")); mutex_unlock(&wl->usr_sync); return err; } + static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da, const struct ether_addr *sa, const struct ether_addr *bssid, @@ -5710,7 +5798,7 @@ wl_notify_rx_mgmt_frame(struct wl_priv *wl, struct net_device *ndev, /* If target scan is not reliable, set the below define to "1" to do a * full escan */ -#define FULL_ESCAN_ON_PFN_NET_FOUND 0 +#define FULL_ESCAN_ON_PFN_NET_FOUND 1 static s32 wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) @@ -5728,10 +5816,10 @@ wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, WL_DBG(("Enter\n")); if (e->event_type == WLC_E_PFN_NET_LOST) { - WL_DBG(("PFN NET LOST event. Do Nothing \n")); + WL_PNO(("PFN NET LOST event. Do Nothing \n")); return 0; } - WL_DBG(("PFN NET FOUND event. count:%d \n", pfn_result->count)); + WL_PNO((">>> PFN NET FOUND event. count:%d \n", pfn_result->count)); if (pfn_result->count > 0) { int i; @@ -5757,7 +5845,7 @@ wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, err = -EINVAL; goto out_err; } - WL_DBG(("SSID:%s Channel:%d \n", + WL_PNO((">>> SSID:%s Channel:%d \n", netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.channel)); /* PFN result doesn't have all the info which are required by the supplicant * (For e.g IEs) Do a target Escan so that sched scan results are reported @@ -5790,6 +5878,7 @@ wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, } if (wl_get_p2p_status(wl, DISCOVERY_ON)) { + WL_PNO((">>> P2P discovery was ON. Disabling it\n")); err = wl_cfgp2p_discover_enable_search(wl, false); if (unlikely(err)) { wl_clr_drv_status(wl, SCANNING, ndev); @@ -5799,8 +5888,10 @@ wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev, wl_set_drv_status(wl, SCANNING, ndev); #if FULL_ESCAN_ON_PFN_NET_FOUND + WL_PNO((">>> Doing Full ESCAN on PNO event\n")); err = wl_do_escan(wl, wiphy, ndev, NULL); #else + WL_PNO((">>> Doing targeted ESCAN on PNO event\n")); err = wl_do_escan(wl, wiphy, ndev, &request); #endif if (err) { @@ -6004,7 +6095,9 @@ static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted) unsigned long flags; WL_DBG(("Enter \n")); - wl->scan_busy_count = 0; + if(!aborted) + wl->scan_busy_count = 0; + if (!wl_get_drv_status(wl, SCANNING, ndev)) { wl_clr_drv_status(wl, SCANNING, ndev); WL_ERR(("Scan complete while device not scanning\n")); @@ -6161,13 +6254,30 @@ static void wl_scan_timeout(unsigned long data) { struct wl_priv *wl = (struct wl_priv *)data; + schedule_work(&wl->work_scan_timeout); +} + +static void wl_scan_timeout_process(struct work_struct *work) +{ + struct wl_priv *wl; + + wl = (wl_priv_t *)container_of(work, wl_priv_t, work_scan_timeout); + if (wl->scan_request) { WL_ERR(("timer expired\n")); if (wl->escan_on) - wl_notify_escan_complete(wl, wl->escan_info.ndev, true, false); + wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true); else wl_notify_iscan_complete(wl_to_iscan(wl), true); } + + /* Assume FW is in bad state if there are continuous scan timeouts */ + wl->scan_busy_count++; + if (wl->scan_busy_count > WL_SCAN_BUSY_MAX) { + wl->scan_busy_count = 0; + WL_ERR(("Continuous scan timeouts!! Exercising FW hang recovery\n")); + net_os_send_hang_message(wl->escan_info.ndev); + } } static void wl_iscan_timer(unsigned long data) @@ -6259,7 +6369,9 @@ static s32 wl_notify_escan_complete(struct wl_priv *wl, WL_DBG(("Enter \n")); - wl->scan_busy_count = 0; + if(!aborted) + wl->scan_busy_count = 0; + if (wl->scan_request) { if (wl->scan_request->dev == wl->p2p_net) dev = wl_to_prmry_ndev(wl); @@ -6267,9 +6379,9 @@ static s32 wl_notify_escan_complete(struct wl_priv *wl, dev = wl->scan_request->dev; } else { - WL_ERR(("wl->scan_request is NULL may be internal scan." - "doing scan_abort for ndev %p primary %p p2p_net %p", - ndev, wl_to_prmry_ndev(wl), wl->p2p_net)); + WL_DBG(("wl->scan_request is NULL may be internal scan." + "doing scan_abort for ndev %p primary %p", + ndev, wl_to_prmry_ndev(wl))); dev = ndev; } if (fw_abort && !in_atomic()) { @@ -6289,19 +6401,15 @@ static s32 wl_notify_escan_complete(struct wl_priv *wl, if (timer_pending(&wl->scan_timeout)) del_timer_sync(&wl->scan_timeout); spin_lock_irqsave(&wl->cfgdrv_lock, flags); - #ifdef WL_SCHED_SCAN if (wl->sched_scan_req && !wl->scan_request) { - WL_DBG((" REPORTING SCHED SCAN RESULTS \n")); - if (aborted) - cfg80211_sched_scan_stopped(wl->sched_scan_req->wiphy); - else + WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n")); + if (!aborted) cfg80211_sched_scan_results(wl->sched_scan_req->wiphy); wl->sched_scan_running = FALSE; wl->sched_scan_req = NULL; } #endif /* WL_SCHED_SCAN */ - if (likely(wl->scan_request)) { cfg80211_scan_done(wl->scan_request, aborted); wl->scan_request = NULL; @@ -6326,12 +6434,15 @@ static s32 wl_escan_handler(struct wl_priv *wl, wl_escan_result_t *escan_result; wl_bss_info_t *bss = NULL; wl_scan_results_t *list; + wifi_p2p_ie_t * p2p_ie; u32 bi_length; u32 i; - wifi_p2p_ie_t * p2p_ie; u8 *p2p_dev_addr = NULL; + WL_DBG((" enter event type : %d, status : %d \n", ntoh32(e->event_type), ntoh32(e->status))); + + mutex_lock(&wl->usr_sync); /* P2P SCAN is coming from primary interface */ if (wl_get_p2p_status(wl, SCANNING)) { if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) @@ -6341,15 +6452,18 @@ static s32 wl_escan_handler(struct wl_priv *wl, } if (!ndev || !wl->escan_on || - !wl_get_drv_status(wl, SCANNING, ndev)) { - WL_ERR(("escan is not ready ndev %p wl->escan_on %d drv_status 0x%x\n", - ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev))); - return err; + (!wl_get_drv_status(wl, SCANNING, ndev) && + !wl->sched_scan_running)) { + WL_ERR(("escan is not ready ndev %p wl->escan_on %d" + " drv_status 0x%x e_type %d e_states %d\n", + ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev), + ntoh32(e->event_type), ntoh32(e->status))); + goto exit; } + escan_result = (wl_escan_result_t *)data; if (status == WLC_E_STATUS_PARTIAL) { WL_INFO(("WLC_E_STATUS_PARTIAL \n")); - escan_result = (wl_escan_result_t *) data; if (!escan_result) { WL_ERR(("Invalid escan result (NULL pointer)\n")); goto exit; @@ -6434,7 +6548,6 @@ static s32 wl_escan_handler(struct wl_priv *wl, bss->RSSI = bi->RSSI; bss->flags |= WLC_BSS_RSSI_ON_CHANNEL; } - goto exit; } } @@ -6442,7 +6555,6 @@ static s32 wl_escan_handler(struct wl_priv *wl, list->version = dtoh32(bi->version); list->buflen += bi_length; list->count++; - } } @@ -6455,12 +6567,10 @@ static s32 wl_escan_handler(struct wl_priv *wl, if (wl->afx_hdl->peer_chan == WL_INVALID) complete(&wl->act_frm_scan); } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { - mutex_lock(&wl->usr_sync); WL_INFO(("ESCAN COMPLETED\n")); wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; wl_inform_bss(wl); wl_notify_escan_complete(wl, ndev, false, false); - mutex_unlock(&wl->usr_sync); } } else if (status == WLC_E_STATUS_ABORT) { @@ -6472,14 +6582,15 @@ static s32 wl_escan_handler(struct wl_priv *wl, if (wl->afx_hdl->peer_chan == WL_INVALID) complete(&wl->act_frm_scan); } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { - mutex_lock(&wl->usr_sync); WL_INFO(("ESCAN ABORTED\n")); wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; wl_inform_bss(wl); wl_notify_escan_complete(wl, ndev, true, false); - mutex_unlock(&wl->usr_sync); } } + else if (status == WLC_E_STATUS_NEWSCAN) { + /* Do Nothing. Ignore this event */ + } else { WL_ERR(("unexpected Escan Event %d : abort\n", status)); wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE; @@ -6490,14 +6601,13 @@ static s32 wl_escan_handler(struct wl_priv *wl, if (wl->afx_hdl->peer_chan == WL_INVALID) complete(&wl->act_frm_scan); } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) { - mutex_lock(&wl->usr_sync); wl->bss_list = (wl_scan_results_t *)wl->escan_info.escan_buf; wl_inform_bss(wl); wl_notify_escan_complete(wl, ndev, true, false); - mutex_unlock(&wl->usr_sync); } } exit: + mutex_unlock(&wl->usr_sync); return err; } @@ -6560,6 +6670,7 @@ static s32 wl_init_priv(struct wl_priv *wl) return -ENOMEM; wl_init_event_handler(wl); mutex_init(&wl->usr_sync); + INIT_WORK(&wl->work_scan_timeout, wl_scan_timeout_process); err = wl_init_scan(wl); if (err) return err; @@ -6579,6 +6690,7 @@ static void wl_deinit_priv(struct wl_priv *wl) wl_link_down(wl); del_timer_sync(&wl->scan_timeout); wl_term_iscan(wl); + cancel_work_sync(&wl->work_scan_timeout); wl_deinit_priv_mem(wl); unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier); } @@ -6990,7 +7102,7 @@ static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 ift return 0; } -static s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add) +s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add) { s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; @@ -7083,6 +7195,10 @@ static int wl_construct_reginfo(struct wl_priv *wl, s32 bw_cap) band = IEEE80211_BAND_5GHZ; ht40_allowed = (bw_cap == WLC_N_BW_20ALL) ? FALSE : TRUE; } + else { + WL_ERR(("Invalid Channel received %x\n", channel)); + continue; + } for (j = 0; (j < *n_cnt && (*n_cnt < array_size)); j++) { if (band_chan_arr[j].hw_value == channel) { @@ -7211,7 +7327,7 @@ s32 wl_update_wiphybands(struct wl_priv *wl) wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; index = IEEE80211_BAND_2GHZ; - if (bandlist[i] == WLC_BAND_2G && bw_cap == WLC_N_BW_40ALL) + if (bw_cap == WLC_N_BW_40ALL) wiphy->bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; } if ((index >= 0) && nmode) { @@ -7220,6 +7336,7 @@ s32 wl_update_wiphybands(struct wl_priv *wl) wiphy->bands[index]->ht_cap.ht_supported = TRUE; wiphy->bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; wiphy->bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + wiphy->bands[index]->ht_cap.mcs.rx_mask[0] = 0xff; } } @@ -7378,6 +7495,9 @@ static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item) case WL_PROF_BSSID: rptr = profile->bssid; break; + case WL_PROF_PENDING_BSSID: + rptr = profile->pending_bssid; + break; case WL_PROF_SSID: rptr = &profile->ssid; break; @@ -7414,6 +7534,12 @@ wl_update_prof(struct wl_priv *wl, struct net_device *ndev, else memset(profile->bssid, 0, ETHER_ADDR_LEN); break; + case WL_PROF_PENDING_BSSID: + if (data) + memcpy(profile->pending_bssid, data, ETHER_ADDR_LEN); + else + memset(profile->pending_bssid, 0, ETHER_ADDR_LEN); + break; case WL_PROF_SEC: memcpy(&profile->sec, data, sizeof(profile->sec)); break; @@ -7757,3 +7883,10 @@ void wl_cfg80211_enable_trace(int level) { wl_dbg_level |= WL_DBG_DBG; } + +static s32 +wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, + struct net_device *dev, u64 cookie) +{ + return 0; +} diff --git a/drivers/net/wireless/bcmdhd/wl_cfg80211.h b/drivers/net/wireless/bcmdhd/wl_cfg80211.h index 49bed703062..472093fdda3 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfg80211.h +++ b/drivers/net/wireless/bcmdhd/wl_cfg80211.h @@ -61,13 +61,24 @@ struct wl_ibss; /* 0 invalidates all debug messages. default is 1 */ #define WL_DBG_LEVEL 0xFF -#define WL_ERR(args) \ +#if defined(DHD_DEBUG) +#define WL_ERR(args) \ do { \ - if (wl_dbg_level & WL_DBG_ERR) { \ + if (wl_dbg_level & WL_DBG_ERR) { \ printk(KERN_ERR "CFG80211-ERROR) %s : ", __func__); \ printk args; \ } \ } while (0) +#else /* defined(DHD_DEBUG) */ +#define WL_ERR(args) \ +do { \ + if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \ + printk(KERN_INFO "CFG80211-ERROR) %s : ", __func__); \ + printk args; \ + } \ +} while (0) +#endif /* defined(DHD_DEBUG) */ + #ifdef WL_INFO #undef WL_INFO #endif @@ -109,7 +120,7 @@ do { \ #else /* !(WL_DBG_LEVEL > 0) */ #define WL_DBG(args) #endif /* (WL_DBG_LEVEL > 0) */ - +#define WL_PNO(args) #define WL_SCAN_RETRY_MAX 3 #define WL_NUM_PMKIDS_MAX MAXPMKID @@ -142,6 +153,8 @@ do { \ #define WL_SCB_TIMEOUT 20 #endif +#define WLAN_REASON_DRIVER_ERROR WLAN_REASON_UNSPECIFIED + /* driver status */ enum wl_status { WL_STATUS_READY = 0, @@ -170,6 +183,7 @@ enum wl_prof_list { WL_PROF_IBSS, WL_PROF_BAND, WL_PROF_BSSID, + WL_PROF_PENDING_BSSID, WL_PROF_ACT, WL_PROF_BEACONINT, WL_PROF_DTIMPERIOD @@ -272,6 +286,7 @@ struct wl_profile { struct wl_security sec; struct wl_ibss ibss; u8 bssid[ETHER_ADDR_LEN]; + u8 pending_bssid[ETHER_ADDR_LEN]; u16 beacon_interval; u8 dtim_period; bool active; @@ -387,7 +402,7 @@ struct afx_hdl { }; /* private data of cfg80211 interface */ -struct wl_priv { +typedef struct wl_priv { struct wireless_dev *wdev; /* representing wl cfg80211 device */ struct wireless_dev *p2p_wdev; /* representing wl cfg80211 device for P2P */ @@ -457,7 +472,9 @@ struct wl_priv { u16 hostapd_chan; /* remember chan requested by framework for hostapd */ u16 deauth_reason; /* Place holder to save deauth/disassoc reasons */ u16 scan_busy_count; -}; + struct work_struct work_scan_timeout; +} wl_priv_t; + static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss) { @@ -671,4 +688,6 @@ int wl_cfg80211_do_driver_init(struct net_device *net); void wl_cfg80211_enable_trace(int level); extern s32 wl_update_wiphybands(struct wl_priv *wl); extern s32 wl_cfg80211_if_is_group_owner(void); +extern int wl_cfg80211_update_power_mode(struct net_device *dev); +extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add); #endif /* _wl_cfg80211_h_ */ diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c index bdcfa95307c..8fcc13c4d30 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfgp2p.c +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.c @@ -641,8 +641,8 @@ wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, } set_ie: ret = wl_cfgp2p_set_management_ie(wl, dev, - wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE), - VNDR_IE_PRBREQ_FLAG, ie, ie_len); + wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE), + VNDR_IE_PRBREQ_FLAG, ie, ie_len); if (unlikely(ret < 0)) { CFGP2P_ERR(("set probreq ie occurs error %d\n", ret)); @@ -846,6 +846,11 @@ wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev, /* Check whether the given IE looks like WFA P2P IE. */ #define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P) + /* Check whether the given IE looks like WFA WFDisplay IE. */ +#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */ +#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ + (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD) + /* Delete and Set a management vndr ie to firmware * Parameters: * @wl : wl_private data @@ -970,7 +975,8 @@ wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bss ie_len = ie_buf[pos++]; if ((ie_id == DOT11_MNG_VS_ID) && (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || - wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) { + wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) { CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :" "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], ie_buf[pos+1], ie_buf[pos+2])); @@ -996,7 +1002,8 @@ wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bss ie_len = ie_buf[pos++]; if ((ie_id == DOT11_MNG_VS_ID) && (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || - wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) { + wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0) || + wl_cfgp2p_is_wfd_ie(&ie_buf[pos-2], NULL, 0))) { CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :" "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], ie_buf[pos+1], ie_buf[pos+2])); @@ -1107,6 +1114,19 @@ wl_cfgp2p_find_p2pie(u8 *parse, u32 len) return NULL; } +wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len) +{ + bcm_tlv_t *ie; + + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) { + return (wifi_wfd_ie_t *)ie; + } + } + return NULL; +} + static s32 wl_cfgp2p_vndr_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag, s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete) @@ -1210,6 +1230,10 @@ wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev, } cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id, &wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL); + if (wl_add_remove_eventmsg(wl_to_prmry_ndev(wl), + WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) { + CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n")); + } } else wl_clr_p2p_status(wl, LISTEN_EXPIRED); @@ -1301,6 +1325,9 @@ wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms) } else wl_clr_p2p_status(wl, LISTEN_EXPIRED); + if (wl_add_remove_eventmsg(wl_to_prmry_ndev(wl), WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) { + CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n")); + } wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); _timer = &wl->p2p->listen_timer; @@ -1731,10 +1758,16 @@ wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int if (legacy_ps != -1) { s32 pm = legacy_ps ? PM_MAX : PM_OFF; +#if defined(SUPPORT_PM2_ONLY) + if (pm == PM_MAX) + pm = PM_FAST; +#endif /* SUPPORT_PM2_ONLY */ ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), WLC_SET_PM, &pm, sizeof(pm), true); if (unlikely(ret)) { CFGP2P_ERR(("error (%d)\n", ret)); + } else { + wl_cfg80211_update_power_mode(ndev); } } } @@ -1797,6 +1830,38 @@ wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id) } #define P2P_GROUP_CAPAB_GO_BIT 0x01 + +u8* +wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib) +{ + bcm_tlv_t *ie; + u8* pAttrib; + + CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len)); + while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { + if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) { + /* Have the P2p ie. Now check for attribute */ + if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) { + CFGP2P_INFO(("P2P attribute %d was found at parse %p", + attrib, parse)); + return pAttrib; + } + else { + parse += (ie->len + TLV_HDR_LEN); + len -= (ie->len + TLV_HDR_LEN); + CFGP2P_INFO(("P2P Attribute %d not found Moving parse" + " to %p len to %d", attrib, parse, len)); + } + } + else { + /* It was not p2p IE. parse will get updated automatically to next TLV */ + CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len)); + } + } + CFGP2P_ERR(("P2P attribute %d was NOT found", attrib)); + return NULL; +} + u8 * wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length) { @@ -1805,12 +1870,8 @@ wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length) bool p2p_go = 0; u8 *ptr = NULL; - if (!(p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset, bi->ie_length))) { - WL_ERR(("P2P IE not found")); - return NULL; - } - - if (!(capability = wl_cfgp2p_retreive_p2pattrib(p2p_ie, P2P_SEID_P2P_INFO))) { + if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset, + bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) { WL_ERR(("P2P Capability attribute not found")); return NULL; } diff --git a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h index 05323ed5d48..be5ddba73a4 100644 --- a/drivers/net/wireless/bcmdhd/wl_cfgp2p.h +++ b/drivers/net/wireless/bcmdhd/wl_cfgp2p.h @@ -31,6 +31,8 @@ struct wl_priv; extern u32 wl_dbg_level; +typedef struct wifi_p2p_ie wifi_wfd_ie_t; + /* Enumeration of the usages of the BSSCFGs used by the P2P Library. Do not * confuse this with a bsscfg index. This value is an index into the * saved_ie[] array of structures which in turn contains a bsscfg index field. @@ -192,6 +194,9 @@ wl_cfgp2p_find_wpsie(u8 *parse, u32 len); extern wifi_p2p_ie_t * wl_cfgp2p_find_p2pie(u8 *parse, u32 len); +extern wifi_wfd_ie_t * +wl_cfgp2p_find_wfdie(u8 *parse, u32 len); + extern s32 wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len); @@ -249,6 +254,9 @@ wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int extern u8 * wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id); +extern u8* +wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib); + extern u8 * wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length); diff --git a/drivers/net/wireless/bcmdhd/wldev_common.c b/drivers/net/wireless/bcmdhd/wldev_common.c index 51a80645fb3..7bea3dcfa71 100644 --- a/drivers/net/wireless/bcmdhd/wldev_common.c +++ b/drivers/net/wireless/bcmdhd/wldev_common.c @@ -334,8 +334,11 @@ int wldev_set_country( scb_val_t scbval; char smbuf[WLC_IOCTL_SMLEN]; - if (!country_code) + if (!country_code) { + WLDEV_ERROR(("%s: set country failed for %s\n", + __FUNCTION__, country_code)); return error; + } error = wldev_iovar_getbuf(dev, "country", &cspec, sizeof(cspec), smbuf, sizeof(smbuf), NULL); diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 87813c33bdc..b2707d733e9 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -2182,6 +2182,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) { int rc = 0; unsigned long flags; + unsigned long now, end; spin_lock_irqsave(&priv->lock, flags); if (priv->status & STATUS_HCMD_ACTIVE) { @@ -2223,10 +2224,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) } spin_unlock_irqrestore(&priv->lock, flags); + now = jiffies; + end = now + HOST_COMPLETE_TIMEOUT; +again: rc = wait_event_interruptible_timeout(priv->wait_command_queue, !(priv-> status & STATUS_HCMD_ACTIVE), - HOST_COMPLETE_TIMEOUT); + end - now); + if (rc < 0) { + now = jiffies; + if (time_before(now, end)) + goto again; + rc = 0; + } + if (rc == 0) { spin_lock_irqsave(&priv->lock, flags); if (priv->status & STATUS_HCMD_ACTIVE) { diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c index dcc1552c046..effeabb031b 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945.c @@ -1872,11 +1872,12 @@ static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) struct iwl_priv *priv = container_of(work, struct iwl_priv, _3945.thermal_periodic.work); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || priv->txq == NULL) + goto out; + iwl3945_reg_txpower_periodic(priv); +out: mutex_unlock(&priv->mutex); } diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 421d5c8b8e3..101a2c2fe17 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c @@ -2763,7 +2763,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data) container_of(data, struct iwl_priv, alive_start.work); mutex_lock(&priv->mutex); - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || priv->txq == NULL) goto out; iwl3945_alive_start(priv); @@ -2910,14 +2910,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) IWL_WARN(priv, "Invalid scan band\n"); return -EIO; } - /* - * If active scaning is requested but a certain channel - * is marked passive, we can do active scanning if we - * detect transmissions. + * If active scaning is requested but a certain channel is marked + * passive, we can do active scanning if we detect transmissions. For + * passive only scanning disable switching to active on any channel. */ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : - IWL_GOOD_CRC_TH_DISABLED; + IWL_GOOD_CRC_TH_NEVER; if (!priv->is_internal_short_scan) { scan->tx_cmd.len = cpu_to_le16( diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index f803fb62f8b..857cf613092 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -2023,6 +2023,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) case IEEE80211_SMPS_STATIC: case IEEE80211_SMPS_DYNAMIC: return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_OFF: return active_cnt; default: diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 592b0cfcf71..2aed7a05e2c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -878,6 +878,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || (priv->bt_full_concurrent != full_concurrent)) { priv->bt_full_concurrent = full_concurrent; + priv->last_bt_traffic_load = priv->bt_traffic_load; /* Update uCode's rate table. */ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 39a3c9c235f..272bcdfe53a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -442,6 +442,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&priv->mutex); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + goto out; + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); goto out; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index 0bd722cee5a..5c9999db33b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -477,7 +477,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, sizeof(struct iwl_keyinfo)); priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; - priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index e5dfdc39a92..d2358cfcbe9 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -267,7 +267,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, else last_seq = priv->rx_seq[tid]; - if (last_seq >= new_node->start_win) + if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && + last_seq >= new_node->start_win) new_node->start_win = last_seq + 1; new_node->win_size = win_size; @@ -612,5 +613,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr); - memset(priv->rx_seq, 0, sizeof(priv->rx_seq)); + mwifiex_reset_11n_rx_seq_num(priv); } diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h index f3ca8c8c18f..7576c2ab93b 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.h +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h @@ -37,6 +37,13 @@ #define ADDBA_RSP_STATUS_ACCEPT 0 +#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff + +static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv) +{ + memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq)); +} + int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *, u16 seqNum, u16 tid, u8 *ta, diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index cd89fed206a..677e60b5250 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -1081,6 +1081,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; + adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); } diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 5eab3dc29b1..890841a46c6 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -1102,10 +1102,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, adhoc_join->bss_descriptor.bssid, adhoc_join->bss_descriptor.ssid); - for (i = 0; bss_desc->supported_rates[i] && - i < MWIFIEX_SUPPORTED_RATES; - i++) - ; + for (i = 0; i < MWIFIEX_SUPPORTED_RATES && + bss_desc->supported_rates[i]; i++) + ; rates_size = i; /* Copy Data Rates from the Rates recorded in scan response */ diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index d425dbd91d1..3b1217f094e 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -122,7 +122,6 @@ static int mwifiex_sdio_suspend(struct device *dev) struct sdio_mmc_card *card; struct mwifiex_adapter *adapter; mmc_pm_flag_t pm_flag = 0; - int hs_actived = 0; int i; int ret = 0; @@ -149,12 +148,14 @@ static int mwifiex_sdio_suspend(struct device *dev) adapter = card->adapter; /* Enable the Host Sleep */ - hs_actived = mwifiex_enable_hs(adapter); - if (hs_actived) { - pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); - ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (!mwifiex_enable_hs(adapter)) { + dev_err(adapter->dev, "cmd: failed to suspend\n"); + return -EFAULT; } + dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + /* Indicate device suspended */ adapter->is_suspended = true; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index d05907d0503..a677e7b4955 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -100,7 +100,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; - if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { + if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= @@ -112,20 +112,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); - /* Set multicast addresses to firmware */ - if (old_pkt_filter == priv->curr_pkt_filter) { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } else { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } + /* Send multicast addresses to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); } } } diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 91634daec30..2cdb41ac743 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -406,6 +406,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE; priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE; + mwifiex_reset_11n_rx_seq_num(priv); + atomic_set(&priv->wmm.tx_pkts_queued, 0); atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c index b33ceb1c066..d895ff972d6 100644 --- a/drivers/net/wireless/p54/p54spi.c +++ b/drivers/net/wireless/p54/p54spi.c @@ -623,19 +623,19 @@ static int __devinit p54spi_probe(struct spi_device *spi) ret = spi_setup(spi); if (ret < 0) { dev_err(&priv->spi->dev, "spi_setup failed"); - goto err_free_common; + goto err_free; } ret = gpio_request(p54spi_gpio_power, "p54spi power"); if (ret < 0) { dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret); - goto err_free_common; + goto err_free; } ret = gpio_request(p54spi_gpio_irq, "p54spi irq"); if (ret < 0) { dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret); - goto err_free_common; + goto err_free_gpio_power; } gpio_direction_output(p54spi_gpio_power, 0); @@ -646,7 +646,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) priv->spi); if (ret < 0) { dev_err(&priv->spi->dev, "request_irq() failed"); - goto err_free_common; + goto err_free_gpio_irq; } irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); @@ -678,6 +678,12 @@ static int __devinit p54spi_probe(struct spi_device *spi) return 0; err_free_common: + free_irq(gpio_to_irq(p54spi_gpio_irq), spi); +err_free_gpio_irq: + gpio_free(p54spi_gpio_irq); +err_free_gpio_power: + gpio_free(p54spi_gpio_power); +err_free: p54_free_common(priv->hw); return ret; } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index a8f3bc740df..99e7e7f2e4d 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -41,11 +41,12 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ @@ -81,7 +82,9 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ @@ -100,6 +103,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 937f9e8bf05..1493171f55c 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1618,6 +1618,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1630,6 +1631,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + rt2x00_set_field32(®, GPIOCSR_BIT8, 1); + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); + /* * Initialize hw specifications. */ diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h index d3a4a68cc43..7564ae992b7 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.h +++ b/drivers/net/wireless/rt2x00/rt2400pci.h @@ -670,6 +670,7 @@ #define GPIOCSR_BIT5 FIELD32(0x00000020) #define GPIOCSR_BIT6 FIELD32(0x00000040) #define GPIOCSR_BIT7 FIELD32(0x00000080) +#define GPIOCSR_BIT8 FIELD32(0x00000100) /* * BBPPCSR: BBP Pin control register. diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index d27d7b8ba3b..cdd480f9202 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1936,6 +1936,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1948,6 +1949,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + rt2x00_set_field32(®, GPIOCSR_DIR0, 1); + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); + /* * Initialize hw specifications. */ diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 15237c27548..f124a1b736d 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) u16 reg; rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); - return rt2x00_get_field32(reg, MAC_CSR19_BIT7); + return rt2x00_get_field16(reg, MAC_CSR19_BIT7); } #ifdef CONFIG_RT2X00_LIB_LEDS @@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u16 reg; /* * Allocate eeprom data. @@ -1780,6 +1781,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); + rt2x00_set_field16(®, MAC_CSR19_BIT8, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); + /* * Initialize hw specifications. */ diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h index b493306a7ee..196bd5103e4 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -189,14 +189,15 @@ * MAC_CSR19: GPIO control register. */ #define MAC_CSR19 0x0426 -#define MAC_CSR19_BIT0 FIELD32(0x0001) -#define MAC_CSR19_BIT1 FIELD32(0x0002) -#define MAC_CSR19_BIT2 FIELD32(0x0004) -#define MAC_CSR19_BIT3 FIELD32(0x0008) -#define MAC_CSR19_BIT4 FIELD32(0x0010) -#define MAC_CSR19_BIT5 FIELD32(0x0020) -#define MAC_CSR19_BIT6 FIELD32(0x0040) -#define MAC_CSR19_BIT7 FIELD32(0x0080) +#define MAC_CSR19_BIT0 FIELD16(0x0001) +#define MAC_CSR19_BIT1 FIELD16(0x0002) +#define MAC_CSR19_BIT2 FIELD16(0x0004) +#define MAC_CSR19_BIT3 FIELD16(0x0008) +#define MAC_CSR19_BIT4 FIELD16(0x0010) +#define MAC_CSR19_BIT5 FIELD16(0x0020) +#define MAC_CSR19_BIT6 FIELD16(0x0040) +#define MAC_CSR19_BIT7 FIELD16(0x0080) +#define MAC_CSR19_BIT8 FIELD16(0x0100) /* * MAC_CSR20: LED control register. diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index e6e174caec8..d44ce307e93 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1935,7 +1935,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) /* * Check if temperature compensation is supported. */ - if (tssi_bounds[4] == 0xff) + if (tssi_bounds[4] == 0xff || step == 0xff) return 0; /* diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 55cd3e1f75b..e947d3abbe9 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -426,7 +426,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_ON); u32 reg; unsigned long flags; @@ -448,25 +447,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, } spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); - rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); - rt2x00_set_field32(®, INT_MASK_CSR_RXDELAYINT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_TXDELAYINT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, mask); - rt2x00_set_field32(®, INT_MASK_CSR_AC0_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_AC1_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_AC2_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_AC3_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_HCCA_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_MGMT_DMA_DONE, 0); - rt2x00_set_field32(®, INT_MASK_CSR_MCU_COMMAND, 0); - rt2x00_set_field32(®, INT_MASK_CSR_RXTX_COHERENT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_TBTT, mask); - rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, mask); - rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, mask); - rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, mask); - rt2x00_set_field32(®, INT_MASK_CSR_GPTIMER, 0); - rt2x00_set_field32(®, INT_MASK_CSR_RX_COHERENT, 0); - rt2x00_set_field32(®, INT_MASK_CSR_TX_COHERENT, 0); + reg = 0; + if (state == STATE_RADIO_IRQ_ON) { + rt2x00_set_field32(®, INT_MASK_CSR_RX_DONE, 1); + rt2x00_set_field32(®, INT_MASK_CSR_TBTT, 1); + rt2x00_set_field32(®, INT_MASK_CSR_PRE_TBTT, 1); + rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, 1); + rt2x00_set_field32(®, INT_MASK_CSR_AUTO_WAKEUP, 1); + } rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); @@ -953,6 +941,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -965,6 +954,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1); + rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + /* * Initialize hw specifications. */ diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 6e7fe941b95..9366ef03e67 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -600,6 +600,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -612,6 +613,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1); + rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + /* * Initialize hw specifications. */ @@ -819,13 +828,17 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x050d, 0x8053) }, { USB_DEVICE(0x050d, 0x805c) }, { USB_DEVICE(0x050d, 0x815c) }, + { USB_DEVICE(0x050d, 0x825a) }, { USB_DEVICE(0x050d, 0x825b) }, { USB_DEVICE(0x050d, 0x935a) }, { USB_DEVICE(0x050d, 0x935b) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00e8) }, + { USB_DEVICE(0x0411, 0x0158) }, + { USB_DEVICE(0x0411, 0x015d) }, { USB_DEVICE(0x0411, 0x016f) }, { USB_DEVICE(0x0411, 0x01a2) }, + { USB_DEVICE(0x0411, 0x01ee) }, /* Corega */ { USB_DEVICE(0x07aa, 0x002f) }, { USB_DEVICE(0x07aa, 0x003c) }, @@ -838,13 +851,17 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x07d1, 0x3c0e) }, { USB_DEVICE(0x07d1, 0x3c0f) }, { USB_DEVICE(0x07d1, 0x3c11) }, + { USB_DEVICE(0x07d1, 0x3c13) }, + { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c16) }, + { USB_DEVICE(0x2001, 0x3c1b) }, /* Draytek */ { USB_DEVICE(0x07fa, 0x7712) }, /* Edimax */ { USB_DEVICE(0x7392, 0x7711) }, { USB_DEVICE(0x7392, 0x7717) }, { USB_DEVICE(0x7392, 0x7718) }, + { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x1480) }, { USB_DEVICE(0x203d, 0x14a9) }, @@ -878,6 +895,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13b1, 0x0031) }, { USB_DEVICE(0x1737, 0x0070) }, { USB_DEVICE(0x1737, 0x0071) }, + { USB_DEVICE(0x1737, 0x0077) }, + { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0162) }, { USB_DEVICE(0x0789, 0x0163) }, @@ -901,9 +920,13 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871b) }, { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3071) }, + { USB_DEVICE(0x1b75, 0x3072) }, /* Para */ { USB_DEVICE(0x20b8, 0x8888) }, /* Pegatron */ + { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x000c) }, { USB_DEVICE(0x1d4d, 0x000e) }, { USB_DEVICE(0x1d4d, 0x0011) }, @@ -941,6 +964,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0df6, 0x0048) }, { USB_DEVICE(0x0df6, 0x0051) }, { USB_DEVICE(0x0df6, 0x005f) }, + { USB_DEVICE(0x0df6, 0x0060) }, /* SMC */ { USB_DEVICE(0x083a, 0x6618) }, { USB_DEVICE(0x083a, 0x7511) }, @@ -955,7 +979,9 @@ static struct usb_device_id rt2800usb_device_table[] = { /* Sparklan */ { USB_DEVICE(0x15a9, 0x0006) }, /* Sweex */ + { USB_DEVICE(0x177f, 0x0153) }, { USB_DEVICE(0x177f, 0x0302) }, + { USB_DEVICE(0x177f, 0x0313) }, /* U-Media */ { USB_DEVICE(0x157e, 0x300e) }, { USB_DEVICE(0x157e, 0x3013) }, @@ -973,6 +999,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0586, 0x341e) }, { USB_DEVICE(0x0586, 0x343e) }, #ifdef CONFIG_RT2800USB_RT33XX + /* Belkin */ + { USB_DEVICE(0x050d, 0x945b) }, /* Ralink */ { USB_DEVICE(0x148f, 0x3370) }, { USB_DEVICE(0x148f, 0x8070) }, @@ -997,6 +1025,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x148f, 0x3572) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0041) }, + { USB_DEVICE(0x0df6, 0x0062) }, /* Toshiba */ { USB_DEVICE(0x0930, 0x0a07) }, /* Zinwell */ @@ -1036,27 +1065,24 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13d3, 0x3322) }, /* Belkin */ { USB_DEVICE(0x050d, 0x1003) }, - { USB_DEVICE(0x050d, 0x825a) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x012e) }, { USB_DEVICE(0x0411, 0x0148) }, { USB_DEVICE(0x0411, 0x0150) }, - { USB_DEVICE(0x0411, 0x015d) }, /* Corega */ { USB_DEVICE(0x07aa, 0x0041) }, { USB_DEVICE(0x07aa, 0x0042) }, { USB_DEVICE(0x18c5, 0x0008) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c0b) }, - { USB_DEVICE(0x07d1, 0x3c13) }, - { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c17) }, { USB_DEVICE(0x2001, 0x3c17) }, /* Edimax */ { USB_DEVICE(0x7392, 0x4085) }, - { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x14a1) }, + /* Fujitsu Stylistic 550 */ + { USB_DEVICE(0x1690, 0x0761) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0010) }, /* Gigabyte */ @@ -1068,20 +1094,13 @@ static struct usb_device_id rt2800usb_device_table[] = { /* LevelOne */ { USB_DEVICE(0x1740, 0x0605) }, { USB_DEVICE(0x1740, 0x0615) }, - /* Linksys */ - { USB_DEVICE(0x1737, 0x0077) }, - { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0168) }, { USB_DEVICE(0x0789, 0x0169) }, /* Motorola */ { USB_DEVICE(0x100d, 0x9032) }, - /* Ovislink */ - { USB_DEVICE(0x1b75, 0x3071) }, - { USB_DEVICE(0x1b75, 0x3072) }, /* Pegatron */ { USB_DEVICE(0x05a6, 0x0101) }, - { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x0010) }, /* Planex */ { USB_DEVICE(0x2019, 0x5201) }, @@ -1095,16 +1114,11 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0df6, 0x004a) }, { USB_DEVICE(0x0df6, 0x004d) }, { USB_DEVICE(0x0df6, 0x0053) }, - { USB_DEVICE(0x0df6, 0x0060) }, - { USB_DEVICE(0x0df6, 0x0062) }, /* SMC */ { USB_DEVICE(0x083a, 0xa512) }, { USB_DEVICE(0x083a, 0xc522) }, { USB_DEVICE(0x083a, 0xd522) }, { USB_DEVICE(0x083a, 0xf511) }, - /* Sweex */ - { USB_DEVICE(0x177f, 0x0153) }, - { USB_DEVICE(0x177f, 0x0313) }, /* Zyxel */ { USB_DEVICE(0x0586, 0x341a) }, #endif diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index dffaa8f45f1..031aa2b1bb4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -410,10 +410,14 @@ void rt2x00lib_txdone(struct queue_entry *entry, /* * If the data queue was below the threshold before the txdone * handler we must make sure the packet queue in the mac80211 stack - * is reenabled when the txdone handler has finished. + * is reenabled when the txdone handler has finished. This has to be + * serialized with rt2x00mac_tx(), otherwise we can wake up queue + * before it was stopped. */ + spin_lock_bh(&entry->queue->tx_lock); if (!rt2x00queue_threshold(entry->queue)) rt2x00queue_unpause_queue(entry->queue); + spin_unlock_bh(&entry->queue->tx_lock); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone); @@ -1109,7 +1113,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) rt2x00dev->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | +#endif BIT(NL80211_IFTYPE_WDS); /* diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index a76fdbe6d7d..bc159bed4f6 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -152,13 +152,22 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) goto exit_fail; + /* + * Pausing queue has to be serialized with rt2x00lib_txdone(). Note + * we should not use spin_lock_bh variant as bottom halve was already + * disabled before ieee80211_xmit() call. + */ + spin_lock(&queue->tx_lock); if (rt2x00queue_threshold(queue)) rt2x00queue_pause_queue(queue); + spin_unlock(&queue->tx_lock); return; exit_fail: + spin_lock(&queue->tx_lock); rt2x00queue_pause_queue(queue); + spin_unlock(&queue->tx_lock); exit_free_skb: dev_kfree_skb_any(skb); } diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 17148bb2442..10fe07d96ea 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, udelay(REGISTER_BUSY_DELAY); } - ERROR(rt2x00dev, "Indirect register access failed: " - "offset=0x%.08x, value=0x%.08x\n", offset, *reg); + printk_once(KERN_ERR "%s() Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); *reg = ~0; return 0; diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 2886d250de5..76f26ad044a 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -562,6 +562,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, u8 rate_idx, rate_flags; int ret = 0; + /* + * That function must be called with bh disabled. + */ spin_lock(&queue->tx_lock); entry = rt2x00queue_get_entry(queue, Q_INDEX); diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 54f0b1345fc..99fa4161440 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue) case QID_RX: if (!rt2x00queue_full(queue)) rt2x00queue_for_each_entry(queue, - Q_INDEX_DONE, Q_INDEX, + Q_INDEX_DONE, NULL, rt2x00usb_kick_rx_entry); break; diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 9d35ec16a3a..17de24eb686 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2254,8 +2254,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) { - struct ieee80211_conf conf = { .flags = 0 }; - struct rt2x00lib_conf libconf = { .conf = &conf }; + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf }; rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } @@ -2841,6 +2840,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Disable power saving. @@ -2858,6 +2858,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + rt2x00_set_field32(®, MAC_CSR13_BIT13, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + /* * Initialize hw specifications. */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h index e3cd6db76b0..8f3da5a5676 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.h +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry { #define MAC_CSR13_BIT10 FIELD32(0x00000400) #define MAC_CSR13_BIT11 FIELD32(0x00000800) #define MAC_CSR13_BIT12 FIELD32(0x00001000) +#define MAC_CSR13_BIT13 FIELD32(0x00002000) /* * MAC_CSR14: LED control register. diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index ad20953cbf0..1a06231cfc7 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -2189,6 +2190,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®); + rt2x00_set_field32(®, MAC_CSR13_BIT15, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); + /* * Initialize hw specifications. */ diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h index 9f6b470414d..df1cc116b83 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.h +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry { #define MAC_CSR13_BIT10 FIELD32(0x00000400) #define MAC_CSR13_BIT11 FIELD32(0x00000800) #define MAC_CSR13_BIT12 FIELD32(0x00001000) +#define MAC_CSR13_BIT13 FIELD32(0x00002000) +#define MAC_CSR13_BIT14 FIELD32(0x00004000) +#define MAC_CSR13_BIT15 FIELD32(0x00008000) /* * MAC_CSR14: LED control register. diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 1e0be14d10d..bf01d218d37 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -43,7 +43,7 @@ MODULE_AUTHOR("Larry Finger "); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c index 2e0de2f5f0f..c2d5b495c17 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/leds.c +++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c @@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev, radio_on = true; } else if (radio_on) { radio_on = false; - cancel_delayed_work_sync(&priv->led_on); + cancel_delayed_work(&priv->led_on); ieee80211_queue_delayed_work(hw, &priv->led_off, 0); } } else if (radio_on) { diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index c872a232427..c29f3980c1f 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -954,8 +954,13 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); - if (pskb) + if (pskb) { + struct rtl_tx_desc *entry = &ring->desc[ring->idx]; + pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc( + (u8 *) entry, true, HW_DESC_TXBUFF_ADDR), + pskb->len, PCI_DMA_TODEVICE); kfree_skb(pskb); + } /*NB: the beacon data buffer must be 32-bit aligned. */ pskb = ieee80211_beacon_get(hw, mac->vif); @@ -1180,10 +1185,12 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, ring->idx = (ring->idx + 1) % ring->entries; } - pci_free_consistent(rtlpci->pdev, - sizeof(*ring->desc) * ring->entries, - ring->desc, ring->dma); - ring->desc = NULL; + if (ring->desc) { + pci_free_consistent(rtlpci->pdev, + sizeof(*ring->desc) * ring->entries, + ring->desc, ring->dma); + ring->desc = NULL; + } } static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci) @@ -1207,12 +1214,14 @@ static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci) kfree_skb(skb); } - pci_free_consistent(rtlpci->pdev, + if (rtlpci->rx_ring[rx_queue_idx].desc) { + pci_free_consistent(rtlpci->pdev, sizeof(*rtlpci->rx_ring[rx_queue_idx]. desc) * rtlpci->rxringcount, rtlpci->rx_ring[rx_queue_idx].desc, rtlpci->rx_ring[rx_queue_idx].dma); - rtlpci->rx_ring[rx_queue_idx].desc = NULL; + rtlpci->rx_ring[rx_queue_idx].desc = NULL; + } } } @@ -1979,6 +1988,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) rtl_deinit_deferred_work(hw); rtlpriv->intf_ops->adapter_stop(hw); } + rtlpriv->cfg->ops->disable_interrupt(hw); /*deinit rfkill */ rtl_deinit_rfkill(hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c index 97183829b9b..2d33d7d8403 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -523,6 +523,10 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw) dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, dm_digtable.backoff_val)); + dm_digtable.cur_igvalue += 2; + if (dm_digtable.cur_igvalue > 0x3f) + dm_digtable.cur_igvalue = 0x3f; + if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, dm_digtable.cur_igvalue); @@ -1218,13 +1222,18 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) ("PreState = %d, CurState = %d\n", p_ra->pre_ratr_state, p_ra->ratr_state)); - rcu_read_lock(); - sta = ieee80211_find_sta(mac->vif, mac->bssid); + /* Only the PCI card uses sta in the update rate table + * callback routine */ + if (rtlhal->interface == INTF_PCI) { + rcu_read_lock(); + sta = ieee80211_find_sta(mac->vif, mac->bssid); + } rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state); p_ra->pre_ratr_state = p_ra->ratr_state; - rcu_read_unlock(); + if (rtlhal->interface == INTF_PCI) + rcu_read_unlock(); } } } diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 942f7a3969a..354f9b15934 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -295,6 +295,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { /*=== Customer ID ===*/ /****** 8188CU ********/ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c index 3b5af0113d7..0c77a14a382 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c @@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, /* Allocate skb buffer to contain firmware */ /* info and tx descriptor info. */ skb = dev_alloc_skb(frag_length); + if (!skb) + return false; skb_reserve(skb, extra_descoffset); seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length - extra_descoffset)); @@ -575,6 +577,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd, len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len); skb = dev_alloc_skb(len); + if (!skb) + return false; cb_desc = (struct rtl_tcb_desc *)(skb->cb); cb_desc->queue_index = TXCMD_QUEUE; cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL; diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index e4272b97829..dbcf5a9cff9 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); - _rtl_usb_rx_process_agg(hw, skb); - ieee80211_rx_irqsafe(hw, skb); + _rtl_usb_rx_process_agg(hw, _skb); + ieee80211_rx_irqsafe(hw, _skb); } } diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index a14a48c99cd..de9210c102e 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c @@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) cancel_work_sync(&wl->irq_work); cancel_work_sync(&wl->tx_work); cancel_work_sync(&wl->filter_work); + cancel_delayed_work_sync(&wl->elp_work); mutex_lock(&wl->mutex); diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c index f51a0241a44..4cf5c2e201d 100644 --- a/drivers/net/wireless/wl1251/sdio.c +++ b/drivers/net/wireless/wl1251/sdio.c @@ -259,6 +259,7 @@ static int wl1251_sdio_probe(struct sdio_func *func, } if (wl->irq) { + irq_set_status_flags(wl->irq, IRQ_NOAUTOEN); ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl); if (ret < 0) { wl1251_error("request_irq() failed: %d", ret); @@ -266,7 +267,6 @@ static int wl1251_sdio_probe(struct sdio_func *func, } irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); - disable_irq(wl->irq); wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; @@ -314,8 +314,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func) if (wl->irq) free_irq(wl->irq, wl); - kfree(wl_sdio); wl1251_free_hw(wl); + kfree(wl_sdio); sdio_claim_host(func); sdio_release_irq(func); diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c index af6448c4d3e..49f3651423d 100644 --- a/drivers/net/wireless/wl1251/spi.c +++ b/drivers/net/wireless/wl1251/spi.c @@ -280,6 +280,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi) wl->use_eeprom = pdata->use_eeprom; + irq_set_status_flags(wl->irq, IRQ_NOAUTOEN); ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); if (ret < 0) { wl1251_error("request_irq() failed: %d", ret); @@ -288,8 +289,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi) irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); - disable_irq(wl->irq); - ret = wl1251_init_ieee80211(wl); if (ret) goto out_irq; diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c index b07f8b7e5f1..e0e16888fe8 100644 --- a/drivers/net/wireless/wl12xx/boot.c +++ b/drivers/net/wireless/wl12xx/boot.c @@ -328,6 +328,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 3; for (i = 0; i < burst_len; i++) { + if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); @@ -339,6 +342,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 4; dest_addr += 4; } + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; } /* @@ -350,6 +356,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) */ nvs_ptr = (u8 *)wl->nvs + ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + nvs_len -= nvs_ptr - (u8 *)wl->nvs; /* Now we must set the partition correctly */ @@ -365,6 +375,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) kfree(nvs_aligned); return 0; + +out_badnvs: + wl1271_error("nvs data is malformed"); + return -EILSEQ; } static void wl1271_boot_enable_interrupts(struct wl1271 *wl) diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c index 42935ac7266..b8ec8cd69b0 100644 --- a/drivers/net/wireless/wl12xx/cmd.c +++ b/drivers/net/wireless/wl12xx/cmd.c @@ -121,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from INI out of bounds"); + return -EINVAL; + } + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -144,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); @@ -163,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from ini out of bounds"); + return -EINVAL; + } + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -187,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 161f207786a..810a47290ab 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -152,6 +152,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); /* Notify xenvif that ring now has space to send an skb to the frontend */ void xenvif_notify_tx_completion(struct xenvif *vif); +/* Prevent the device from generating any further traffic. */ +void xenvif_carrier_off(struct xenvif *vif); + /* Returns number of ring slots required to send an skb to the frontend */ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 182562952c7..8eaf0e2aaf2 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif) static void xenvif_down(struct xenvif *vif) { disable_irq(vif->irq); + del_timer_sync(&vif->credit_timeout); xen_netbk_deschedule_xenvif(vif); xen_netbk_remove_xenvif(vif); } @@ -342,23 +343,26 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, return err; } -void xenvif_disconnect(struct xenvif *vif) +void xenvif_carrier_off(struct xenvif *vif) { struct net_device *dev = vif->dev; - if (netif_carrier_ok(dev)) { - rtnl_lock(); - netif_carrier_off(dev); /* discard queued packets */ - if (netif_running(dev)) - xenvif_down(vif); - rtnl_unlock(); - xenvif_put(vif); - } + + rtnl_lock(); + netif_carrier_off(dev); /* discard queued packets */ + if (netif_running(dev)) + xenvif_down(vif); + rtnl_unlock(); + xenvif_put(vif); +} + +void xenvif_disconnect(struct xenvif *vif) +{ + if (netif_carrier_ok(vif->dev)) + xenvif_carrier_off(vif); atomic_dec(&vif->refcnt); wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); - del_timer_sync(&vif->credit_timeout); - if (vif->irq) unbind_from_irqhandler(vif->irq, vif); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 0e4851b8a77..1260bf0d7e0 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -143,7 +143,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif) atomic_dec(&netbk->netfront_count); } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, + u8 status); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); @@ -838,7 +839,7 @@ static void netbk_tx_err(struct xenvif *vif, do { make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - if (cons >= end) + if (cons == end) break; txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); @@ -847,6 +848,13 @@ static void netbk_tx_err(struct xenvif *vif, xenvif_put(vif); } +static void netbk_fatal_tx_err(struct xenvif *vif) +{ + netdev_err(vif->dev, "fatal error; disabling device\n"); + xenvif_carrier_off(vif); + xenvif_put(vif); +} + static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, @@ -860,29 +868,33 @@ static int netbk_count_requests(struct xenvif *vif, do { if (frags >= work_to_do) { - netdev_dbg(vif->dev, "Need more frags\n"); - return -frags; + netdev_err(vif->dev, "Need more frags\n"); + netbk_fatal_tx_err(vif); + return -ENODATA; } if (unlikely(frags >= MAX_SKB_FRAGS)) { - netdev_dbg(vif->dev, "Too many frags\n"); - return -frags; + netdev_err(vif->dev, "Too many frags\n"); + netbk_fatal_tx_err(vif); + return -E2BIG; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { - netdev_dbg(vif->dev, "Frags galore\n"); - return -frags; + netdev_err(vif->dev, "Frag is bigger than frame.\n"); + netbk_fatal_tx_err(vif); + return -EIO; } first->size -= txp->size; frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", + netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); - return -frags; + netbk_fatal_tx_err(vif); + return -EINVAL; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; @@ -925,7 +937,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, pending_idx = netbk->pending_ring[index]; page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) - return NULL; + goto err; netbk->mmap_pages[pending_idx] = page; @@ -949,6 +961,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, } return gop; +err: + /* Unwind, freeing all pages and sending error responses. */ + while (i-- > start) { + xen_netbk_idx_release(netbk, (unsigned long)shinfo->frags[i].page, + XEN_NETIF_RSP_ERROR); + } + /* The head too, if necessary. */ + if (start) + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + + return NULL; } static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, @@ -957,30 +980,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, { struct gnttab_copy *gop = *gopp; int pending_idx = *((u16 *)skb->data); - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; - struct xenvif *vif = pending_tx_info[pending_idx].vif; - struct xen_netif_tx_request *txp; struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i, err, start; /* Check status of header. */ err = gop->status; - if (unlikely(err)) { - pending_ring_idx_t index; - index = pending_index(netbk->pending_prod++); - txp = &pending_tx_info[pending_idx].req; - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - netbk->pending_ring[index] = pending_idx; - xenvif_put(vif); - } + if (unlikely(err)) + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Skip first skb fragment if it is on same page as header fragment. */ start = ((unsigned long)shinfo->frags[0].page == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; - pending_ring_idx_t index; pending_idx = (unsigned long)shinfo->frags[i].page; @@ -989,16 +1002,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); continue; } /* Error on this fragment: respond to client with an error. */ - txp = &netbk->pending_tx_info[pending_idx].req; - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = pending_idx; - xenvif_put(vif); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) @@ -1006,10 +1015,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); for (j = start; j < i; j++) { pending_idx = (unsigned long)shinfo->frags[i].page; - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } /* Remember the error: invalidate all subsequent fragments. */ @@ -1044,7 +1053,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) /* Take an extra reference to offset xen_netbk_idx_release */ get_page(netbk->mmap_pages[pending_idx]); - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } } @@ -1057,7 +1066,8 @@ static int xen_netbk_get_extras(struct xenvif *vif, do { if (unlikely(work_to_do-- <= 0)) { - netdev_dbg(vif->dev, "Missing extra info\n"); + netdev_err(vif->dev, "Missing extra info\n"); + netbk_fatal_tx_err(vif); return -EBADR; } @@ -1066,8 +1076,9 @@ static int xen_netbk_get_extras(struct xenvif *vif, if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { vif->tx.req_cons = ++cons; - netdev_dbg(vif->dev, + netdev_err(vif->dev, "Invalid extra type: %d\n", extra.type); + netbk_fatal_tx_err(vif); return -EINVAL; } @@ -1083,13 +1094,15 @@ static int netbk_set_skb_gso(struct xenvif *vif, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { - netdev_dbg(vif->dev, "GSO size must not be zero.\n"); + netdev_err(vif->dev, "GSO size must not be zero.\n"); + netbk_fatal_tx_err(vif); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + netbk_fatal_tx_err(vif); return -EINVAL; } @@ -1226,9 +1239,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); + /* This can sometimes happen because the test of + * list_empty(net_schedule_list) at the top of the + * loop is unlocked. Just go back and have another + * look. + */ if (!vif) continue; + if (vif->tx.sring->req_prod - vif->tx.req_cons > + XEN_NETIF_TX_RING_SIZE) { + netdev_err(vif->dev, + "Impossible number of requests. " + "req_prod %d, req_cons %d, size %ld\n", + vif->tx.sring->req_prod, vif->tx.req_cons, + XEN_NETIF_TX_RING_SIZE); + netbk_fatal_tx_err(vif); + continue; + } + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); @@ -1256,17 +1285,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; - if (unlikely(work_to_do < 0)) { - netbk_tx_err(vif, &txreq, idx); + if (unlikely(work_to_do < 0)) continue; - } } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); - if (unlikely(ret < 0)) { - netbk_tx_err(vif, &txreq, idx - ret); + if (unlikely(ret < 0)) continue; - } + idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { @@ -1278,11 +1304,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { - netdev_dbg(vif->dev, + netdev_err(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); - netbk_tx_err(vif, &txreq, idx); + netbk_fatal_tx_err(vif); continue; } @@ -1310,8 +1336,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { + /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); continue; } } @@ -1412,7 +1438,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk) txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(netbk, pending_idx); + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1467,7 +1493,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk) } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, + u8 status) { struct xenvif *vif; struct pending_tx_info *pending_tx_info; @@ -1481,7 +1508,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) vif = pending_tx_info->vif; - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); + make_tx_response(vif, &pending_tx_info->req, status); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c index 9046f7b2ed7..137406ca73f 100644 --- a/drivers/oprofile/oprofile_perf.c +++ b/drivers/oprofile/oprofile_perf.c @@ -25,7 +25,7 @@ static int oprofile_perf_enabled; static DEFINE_MUTEX(oprofile_perf_mutex); static struct op_counter_config *counter_config; -static struct perf_event **perf_events[nr_cpumask_bits]; +static struct perf_event **perf_events[NR_CPUS]; static int num_counters; /* diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a70fa89f76f..7bd36947deb 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -131,7 +131,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle)) return AE_OK; - acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); + if (ACPI_FAILURE(status)) { + warn("can't evaluate _ADR (%#x)\n", status); + return AE_OK; + } + device = (adr >> 16) & 0xffff; function = adr & 0xffff; diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 0ec8930f31b..ae762ecc658 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1793,10 +1793,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, if (!pte) return -ENOMEM; /* It is large page*/ - if (largepage_lvl > 1) + if (largepage_lvl > 1) { pteval |= DMA_PTE_LARGE_PAGE; - else + /* Ensure that old small page tables are removed to make room + for superpage, if they exist. */ + dma_pte_clear_range(domain, iov_pfn, + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + dma_pte_free_pagetable(domain, iov_pfn, + iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); + } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; + } } /* We don't need lock here, nobody else @@ -2282,8 +2289,39 @@ static int domain_add_dev_info(struct dmar_domain *domain, return 0; } +static bool device_has_rmrr(struct pci_dev *dev) +{ + struct dmar_rmrr_unit *rmrr; + int i; + + for_each_rmrr_units(rmrr) { + for (i = 0; i < rmrr->devices_cnt; i++) { + /* + * Return TRUE if this RMRR contains the device that + * is passed in. + */ + if (rmrr->devices[i] == dev) + return true; + } + } + return false; +} + static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + + /* + * We want to prevent any device associated with an RMRR from + * getting placed into the SI Domain. This is done because + * problems exist when devices are moved in and out of domains + * and their respective RMRR info is lost. We exempt USB devices + * from this process due to their usage of RMRRs that are known + * to not be needed after BIOS hand-off to OS. + */ + if (device_has_rmrr(pdev) && + (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) + return 0; + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return 1; @@ -3573,6 +3611,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, found = 1; } + spin_unlock_irqrestore(&device_domain_lock, flags); + if (found == 0) { unsigned long tmp_flags; spin_lock_irqsave(&domain->iommu_lock, tmp_flags); @@ -3589,8 +3629,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, spin_unlock_irqrestore(&iommu->lock, tmp_flags); } } - - spin_unlock_irqrestore(&device_domain_lock, flags); } static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 2f10328bf66..e1749825008 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -869,5 +869,15 @@ EXPORT_SYMBOL(pci_msi_enabled); void pci_msi_init_pci_dev(struct pci_dev *dev) { + int pos; INIT_LIST_HEAD(&dev->msi_list); + + /* Disable the msi hardware to avoid screaming interrupts + * during boot. This is the power on reset default so + * usually this should be a noop. + */ + pos = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (pos) + msi_set_enable(dev, pos, 0); + msix_set_enable(dev, 0); } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index d36f41ea8cb..56b04bc80a1 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -393,7 +393,6 @@ static int __init acpi_pci_init(void) if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); - pcie_clear_aspm(); pcie_no_aspm(); } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 46767c53917..5d5bdf7ecf5 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -726,6 +726,18 @@ static int pci_pm_suspend_noirq(struct device *dev) pci_pm_set_unknown_state(pci_dev); + /* + * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's + * PCI COMMAND register isn't 0, the BIOS assumes that the controller + * hasn't been quiesced and tries to turn it off. If the controller + * is already in D3, this can hang or cause memory corruption. + * + * Since the value of the COMMAND register doesn't matter once the + * device has been suspended, we can safely set it to 0 here. + */ + if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) + pci_write_config_word(pci_dev, PCI_COMMAND, 0); + return 0; } @@ -924,6 +936,13 @@ static int pci_pm_poweroff_noirq(struct device *dev) if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); + /* + * The reason for doing this here is the same as for the analogous code + * in pci_pm_suspend_noirq(). + */ + if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) + pci_write_config_word(pci_dev, PCI_COMMAND, 0); + return 0; } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 6892601fc76..9b9305ae93a 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -68,7 +68,7 @@ struct pcie_link_state { struct aspm_latency acceptable[8]; }; -static int aspm_disabled, aspm_force, aspm_clear_state; +static int aspm_disabled, aspm_force; static bool aspm_support_enabled = true; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); @@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) int pos; u32 reg32; - if (aspm_clear_state) - return -EINVAL; - /* * Some functions in a slot might not all be PCIe functions, * very strange. Disable ASPM for the whole slot @@ -511,6 +508,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) pos = pci_pcie_cap(child); if (!pos) return -EINVAL; + + /* + * If ASPM is disabled then we're not going to change + * the BIOS state. It's safe to continue even if it's a + * pre-1.1 device + */ + + if (aspm_disabled) + continue; + /* * Disable ASPM for pre-1.1 PCIe device, we follow MS to use * RBER bit to determine if a function is 1.1 version device @@ -574,9 +581,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) return; - if (aspm_disabled && !aspm_clear_state) - return; - /* VIA has a strange chipset, root port is under a bridge */ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT && pdev->bus->self) @@ -608,7 +612,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev) * the BIOS's expectation, we'll do so once pci_enable_device() is * called. */ - if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) { + if (aspm_policy != POLICY_POWERSAVE) { pcie_config_aspm_path(link); pcie_set_clkpm(link, policy_to_clkpm_state(link)); } @@ -649,8 +653,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link, *root, *parent_link; - if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) || - !parent || !parent->link_state) + if (!pci_is_pcie(pdev) || !parent || !parent->link_state) return; if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) @@ -734,13 +737,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev) * pci_disable_link_state - disable pci device's link state, so the link will * never enter specific states */ -static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) +static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem, + bool force) { struct pci_dev *parent = pdev->bus->self; struct pcie_link_state *link; - if (aspm_disabled || !pci_is_pcie(pdev)) + if (aspm_disabled && !force) + return; + + if (!pci_is_pcie(pdev)) return; + if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) parent = pdev; @@ -768,16 +776,34 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) void pci_disable_link_state_locked(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, false); + __pci_disable_link_state(pdev, state, false, false); } EXPORT_SYMBOL(pci_disable_link_state_locked); void pci_disable_link_state(struct pci_dev *pdev, int state) { - __pci_disable_link_state(pdev, state, true); + __pci_disable_link_state(pdev, state, true, false); } EXPORT_SYMBOL(pci_disable_link_state); +void pcie_clear_aspm(struct pci_bus *bus) +{ + struct pci_dev *child; + + if (aspm_force) + return; + + /* + * Clear any ASPM setup that the firmware has carried out on this bus + */ + list_for_each_entry(child, &bus->devices, bus_list) { + __pci_disable_link_state(child, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM, + false, true); + } +} + static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) { int i; @@ -935,6 +961,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) static int __init pcie_aspm_disable(char *str) { if (!strcmp(str, "off")) { + aspm_policy = POLICY_DEFAULT; aspm_disabled = 1; aspm_support_enabled = false; printk(KERN_INFO "PCIe ASPM is disabled\n"); @@ -947,16 +974,18 @@ static int __init pcie_aspm_disable(char *str) __setup("pcie_aspm=", pcie_aspm_disable); -void pcie_clear_aspm(void) -{ - if (!aspm_force) - aspm_clear_state = 1; -} - void pcie_no_aspm(void) { - if (!aspm_force) + /* + * Disabling ASPM is intended to prevent the kernel from modifying + * existing hardware state, not to clear existing state. To that end: + * (a) set policy to POLICY_DEFAULT in order to avoid changing state + * (b) prevent userspace from changing policy + */ + if (!aspm_force) { + aspm_policy = POLICY_DEFAULT; aspm_disabled = 1; + } } /** diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index bafb3c3d4a8..0d5d0bfcb66 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -657,10 +657,17 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", secondary, subordinate, pass); + if (!primary && (primary != bus->number) && secondary && subordinate) { + dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); + primary = bus->number; + } + /* Check if setup is sensible at all */ if (!pass && - (primary != bus->number || secondary <= bus->number)) { - dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); + (primary != bus->number || secondary <= bus->number || + secondary > subordinate)) { + dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", + secondary, subordinate); broken = 1; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index cec46292731..a9b12497676 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2746,7 +2746,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) if (PCI_FUNC(dev->devfn)) return; /* - * RICOH 0xe823 SD/MMC card reader fails to recognize + * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize * certain types of SD/MMC cards. Lowering the SD base * clock frequency from 200Mhz to 50Mhz fixes this issue. * @@ -2757,7 +2757,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) * 0xf9 - Key register for 0x150 * 0xfc - key register for 0xe1 */ - if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { + if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || + dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { pci_write_config_byte(dev, 0xf9, 0xfc); pci_write_config_byte(dev, 0x150, 0x10); pci_write_config_byte(dev, 0xf9, 0x00); @@ -2784,6 +2785,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ @@ -2822,6 +2825,40 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class); +/* + * Some BIOS implementations leave the Intel GPU interrupts enabled, + * even though no one is handling them (f.e. i915 driver is never loaded). + * Additionally the interrupt destination is not set up properly + * and the interrupt ends up -somewhere-. + * + * These spurious interrupts are "sticky" and the kernel disables + * the (shared) interrupt line after 100.000+ generated interrupts. + * + * Fix it by disabling the still enabled interrupts. + * This resolves crashes often seen on monitor unplug. + */ +#define I915_DEIER_REG 0x4400c +static void __devinit disable_igfx_irq(struct pci_dev *dev) +{ + void __iomem *regs = pci_iomap(dev, 0, 0); + if (regs == NULL) { + dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n"); + return; + } + + /* Check if any interrupt line is still enabled */ + if (readl(regs + I915_DEIER_REG) != 0) { + dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; " + "disabling\n"); + + writel(0, regs + I915_DEIER_REG); + } + + pci_iounmap(dev, regs); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) { diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 7f87beed35a..f53da9eda76 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev) static void pci_stop_dev(struct pci_dev *dev) { + pci_pme_active(dev, false); + if (dev->is_added) { pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 9995842e45b..49e03234c72 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -34,6 +34,7 @@ struct resource_list_x { resource_size_t start; resource_size_t end; resource_size_t add_size; + resource_size_t min_align; unsigned long flags; }; @@ -65,7 +66,7 @@ void pci_realloc(void) */ static void add_to_list(struct resource_list_x *head, struct pci_dev *dev, struct resource *res, - resource_size_t add_size) + resource_size_t add_size, resource_size_t min_align) { struct resource_list_x *list = head; struct resource_list_x *ln = list->next; @@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head, tmp->end = res->end; tmp->flags = res->flags; tmp->add_size = add_size; + tmp->min_align = min_align; list->next = tmp; } static void add_to_failed_list(struct resource_list_x *head, struct pci_dev *dev, struct resource *res) { - add_to_list(head, dev, res, 0); + add_to_list(head, dev, res, + 0 /* dont care */, + 0 /* dont care */); } static void __dev_sort_resources(struct pci_dev *dev, @@ -159,13 +163,16 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, idx = res - &list->dev->resource[0]; add_size=list->add_size; - if (!resource_size(res) && add_size) { - res->end = res->start + add_size - 1; - if(pci_assign_resource(list->dev, idx)) + if (!resource_size(res)) { + res->end = res->start + add_size - 1; + if(pci_assign_resource(list->dev, idx)) reset_resource(res); - } else if (add_size) { - adjust_resource(res, res->start, - resource_size(res) + add_size); + } else { + resource_size_t align = list->min_align; + res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); + if (pci_reassign_resource(list->dev, idx, add_size, align)) + dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n", + res); } out: tmp = list; @@ -543,6 +550,20 @@ static resource_size_t calculate_memsize(resource_size_t size, return size; } +static resource_size_t get_res_add_size(struct resource_list_x *add_head, + struct resource *res) +{ + struct resource_list_x *list; + + /* check if it is in add_head list */ + for (list = add_head->next; list && list->res != res; + list = list->next); + if (list) + return list->add_size; + + return 0; +} + /** * pbus_size_io() - size the io window of a given bus * @@ -562,6 +583,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, struct pci_dev *dev; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); unsigned long size = 0, size0 = 0, size1 = 0; + resource_size_t children_add_size = 0; if (!b_res) return; @@ -582,12 +604,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, size += r_size; else size1 += r_size; + + if (add_head) + children_add_size += get_res_add_size(add_head, r); } } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); + if (children_add_size > add_size) + add_size = children_add_size; size1 = (!add_head || (add_head && !add_size)) ? size0 : - calculate_iosize(size, min_size+add_size, size1, + calculate_iosize(size, min_size, add_size + size1, resource_size(b_res), 4096); if (!size0 && !size1) { if (b_res->start || b_res->end) @@ -602,7 +629,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, b_res->end = b_res->start + size0 - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && add_head) - add_to_list(add_head, bus->self, b_res, size1-size0); + add_to_list(add_head, bus->self, b_res, size1-size0, 4096); } /** @@ -627,6 +654,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, int order, max_order; struct resource *b_res = find_free_bus_resource(bus, type); unsigned int mem64_mask = 0; + resource_size_t children_add_size = 0; if (!b_res) return 0; @@ -668,6 +696,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (order > max_order) max_order = order; mem64_mask &= r->flags & IORESOURCE_MEM_64; + + if (add_head) + children_add_size += get_res_add_size(add_head, r); } } align = 0; @@ -684,8 +715,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); + if (children_add_size > add_size) + add_size = children_add_size; size1 = (!add_head || (add_head && !add_size)) ? size0 : - calculate_memsize(size, min_size+add_size, 0, + calculate_memsize(size, min_size, add_size, resource_size(b_res), min_align); if (!size0 && !size1) { if (b_res->start || b_res->end) @@ -699,7 +732,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; if (size1 > size0 && add_head) - add_to_list(add_head, bus->self, b_res, size1-size0); + add_to_list(add_head, bus->self, b_res, size1-size0, min_align); return 1; } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index bc0e6eea0ff..e4261498fcf 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -129,16 +129,16 @@ void pci_disable_bridge_window(struct pci_dev *dev) } #endif /* CONFIG_PCI_QUIRKS */ + + static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, - int resno) + int resno, resource_size_t size, resource_size_t align) { struct resource *res = dev->resource + resno; - resource_size_t size, min, align; + resource_size_t min; int ret; - size = resource_size(res); min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; - align = pci_resource_alignment(dev, res); /* First, try exact prefetching match.. */ ret = pci_bus_alloc_resource(bus, res, size, align, min, @@ -155,73 +155,51 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, pcibios_align_resource, dev); } + return ret; +} - if (ret < 0 && dev->fw_addr[resno]) { - struct resource *root, *conflict; - resource_size_t start, end; - - /* - * If we failed to assign anything, let's try the address - * where firmware left it. That at least has a chance of - * working, which is better than just leaving it disabled. - */ - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - else - root = &iomem_resource; - - start = res->start; - end = res->end; - res->start = dev->fw_addr[resno]; - res->end = res->start + size - 1; - dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", - resno, res); - conflict = request_resource_conflict(root, res); - if (conflict) { - dev_info(&dev->dev, - "BAR %d: %pR conflicts with %s %pR\n", resno, - res, conflict->name, conflict); - res->start = start; - res->end = end; - } else - ret = 0; - } +static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, + int resno, resource_size_t size) +{ + struct resource *root, *conflict; + resource_size_t start, end; + int ret = 0; - if (!ret) { - res->flags &= ~IORESOURCE_STARTALIGN; - dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); - if (resno < PCI_BRIDGE_RESOURCES) - pci_update_resource(dev, resno); + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + else + root = &iomem_resource; + + start = res->start; + end = res->end; + res->start = dev->fw_addr[resno]; + res->end = res->start + size - 1; + dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", + resno, res); + conflict = request_resource_conflict(root, res); + if (conflict) { + dev_info(&dev->dev, + "BAR %d: %pR conflicts with %s %pR\n", resno, + res, conflict->name, conflict); + res->start = start; + res->end = end; + ret = 1; } - return ret; } -int pci_assign_resource(struct pci_dev *dev, int resno) +static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align) { struct resource *res = dev->resource + resno; - resource_size_t align; struct pci_bus *bus; int ret; char *type; - align = pci_resource_alignment(dev, res); - if (!align) { - dev_info(&dev->dev, "BAR %d: can't assign %pR " - "(bogus alignment)\n", resno, res); - return -EINVAL; - } - bus = dev->bus; - while ((ret = __pci_assign_resource(bus, dev, resno))) { - if (bus->parent && bus->self->transparent) - bus = bus->parent; - else - bus = NULL; - if (bus) - continue; - break; + while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { + if (!bus->parent || !bus->self->transparent) + break; + bus = bus->parent; } if (ret) { @@ -242,6 +220,67 @@ int pci_assign_resource(struct pci_dev *dev, int resno) return ret; } +int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, + resource_size_t min_align) +{ + struct resource *res = dev->resource + resno; + resource_size_t new_size; + int ret; + + if (!res->parent) { + dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR " + "\n", resno, res); + return -EINVAL; + } + + /* already aligned with min_align */ + new_size = resource_size(res) + addsize; + ret = _pci_assign_resource(dev, resno, new_size, min_align); + if (!ret) { + res->flags &= ~IORESOURCE_STARTALIGN; + dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res); + if (resno < PCI_BRIDGE_RESOURCES) + pci_update_resource(dev, resno); + } + return ret; +} + +int pci_assign_resource(struct pci_dev *dev, int resno) +{ + struct resource *res = dev->resource + resno; + resource_size_t align, size; + struct pci_bus *bus; + int ret; + + align = pci_resource_alignment(dev, res); + if (!align) { + dev_info(&dev->dev, "BAR %d: can't assign %pR " + "(bogus alignment)\n", resno, res); + return -EINVAL; + } + + bus = dev->bus; + size = resource_size(res); + ret = _pci_assign_resource(dev, resno, size, align); + + /* + * If we failed to assign anything, let's try the address + * where firmware left it. That at least has a chance of + * working, which is better than just leaving it disabled. + */ + if (ret < 0 && dev->fw_addr[resno]) + ret = pci_revert_fw_address(res, dev, resno, size); + + if (!ret) { + res->flags &= ~IORESOURCE_STARTALIGN; + dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); + if (resno < PCI_BRIDGE_RESOURCES) + pci_update_resource(dev, resno); + } + return ret; +} + + /* Sort resources by alignment */ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) { diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 749c2a16012..1932029de48 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt) static int pcmcia_bus_early_resume(struct pcmcia_socket *skt) { - if (!verify_cis_cache(skt)) { - pcmcia_put_socket(skt); + if (!verify_cis_cache(skt)) return 0; - } dev_dbg(&skt->dev, "cis mismatch - different card\n"); diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index 81af2b3bcc0..097fa0092f5 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c @@ -222,7 +222,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) sharpsl_pcmcia_init_reset(skt); } -static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = { +static struct pcmcia_low_level sharpsl_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = sharpsl_pcmcia_hw_init, .hw_shutdown = sharpsl_pcmcia_hw_shutdown, diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index 86e4a1a3c64..6bb02ab5dd9 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c @@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock) socket = &vrc4171_sockets[slot]; socket->csc_irq = search_nonuse_irq(); socket->io_irq = search_nonuse_irq(); + spin_lock_init(&socket->lock); return 0; } diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index e1c4938b301..202b567a706 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -104,6 +104,7 @@ static const struct key_entry acer_wmi_keymap[] = { {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ + {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x43, {KEY_NEXTSONG} }, @@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { .wireless = 2, }; +static struct quirk_entry quirk_lenovo_ideapad_s205 = { + .wireless = 3, +}; + /* The Aspire One has a dummy ACPI-WMI interface - disable it */ static struct dmi_system_id __devinitdata acer_blacklist[] = { { @@ -450,6 +455,24 @@ static struct dmi_system_id acer_quirks[] = { }, .driver_data = &quirk_medion_md_98300, }, + { + .callback = dmi_matched, + .ident = "Lenovo Ideapad S205", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"), + }, + .driver_data = &quirk_lenovo_ideapad_s205, + }, + { + .callback = dmi_matched, + .ident = "Lenovo 3000 N200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"), + }, + .driver_data = &quirk_fujitsu_amilo_li_1718, + }, {} }; @@ -542,6 +565,12 @@ struct wmi_interface *iface) return AE_ERROR; *value = result & 0x1; return AE_OK; + case 3: + err = ec_read(0x78, &result); + if (err) + return AE_ERROR; + *value = result & 0x1; + return AE_OK; default: err = ec_read(0xA, &result); if (err) @@ -648,6 +677,33 @@ static acpi_status AMW0_find_mailled(void) return AE_OK; } +static int AMW0_set_cap_acpi_check_device_found; + +static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle, + u32 level, void *context, void **retval) +{ + AMW0_set_cap_acpi_check_device_found = 1; + return AE_OK; +} + +static const struct acpi_device_id norfkill_ids[] = { + { "VPC2004", 0}, + { "IBM0068", 0}, + { "LEN0068", 0}, + { "SNY5001", 0}, /* sony-laptop in charge */ + { "", 0}, +}; + +static int AMW0_set_cap_acpi_check_device(void) +{ + const struct acpi_device_id *id; + + for (id = norfkill_ids; id->id[0]; id++) + acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb, + NULL, NULL); + return AMW0_set_cap_acpi_check_device_found; +} + static acpi_status AMW0_set_capabilities(void) { struct wmab_args args; @@ -661,7 +717,9 @@ static acpi_status AMW0_set_capabilities(void) * work. */ if (wmi_has_guid(AMW0_GUID2)) { - interface->capability |= ACER_CAP_WIRELESS; + if ((quirks != &quirk_unknown) || + !AMW0_set_cap_acpi_check_device()) + interface->capability |= ACER_CAP_WIRELESS; return AE_OK; } @@ -1265,9 +1323,15 @@ static void acer_rfkill_update(struct work_struct *ignored) u32 state; acpi_status status; - status = get_u32(&state, ACER_CAP_WIRELESS); - if (ACPI_SUCCESS(status)) - rfkill_set_sw_state(wireless_rfkill, !state); + if (has_cap(ACER_CAP_WIRELESS)) { + status = get_u32(&state, ACER_CAP_WIRELESS); + if (ACPI_SUCCESS(status)) { + if (quirks->wireless == 3) + rfkill_set_hw_state(wireless_rfkill, !state); + else + rfkill_set_sw_state(wireless_rfkill, !state); + } + } if (has_cap(ACER_CAP_BLUETOOTH)) { status = get_u32(&state, ACER_CAP_BLUETOOTH); @@ -1334,19 +1398,24 @@ static struct rfkill *acer_rfkill_register(struct device *dev, static int acer_rfkill_init(struct device *dev) { - wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN, - "acer-wireless", ACER_CAP_WIRELESS); - if (IS_ERR(wireless_rfkill)) - return PTR_ERR(wireless_rfkill); + int err; + + if (has_cap(ACER_CAP_WIRELESS)) { + wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN, + "acer-wireless", ACER_CAP_WIRELESS); + if (IS_ERR(wireless_rfkill)) { + err = PTR_ERR(wireless_rfkill); + goto error_wireless; + } + } if (has_cap(ACER_CAP_BLUETOOTH)) { bluetooth_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_BLUETOOTH, "acer-bluetooth", ACER_CAP_BLUETOOTH); if (IS_ERR(bluetooth_rfkill)) { - rfkill_unregister(wireless_rfkill); - rfkill_destroy(wireless_rfkill); - return PTR_ERR(bluetooth_rfkill); + err = PTR_ERR(bluetooth_rfkill); + goto error_bluetooth; } } @@ -1355,30 +1424,44 @@ static int acer_rfkill_init(struct device *dev) RFKILL_TYPE_WWAN, "acer-threeg", ACER_CAP_THREEG); if (IS_ERR(threeg_rfkill)) { - rfkill_unregister(wireless_rfkill); - rfkill_destroy(wireless_rfkill); - rfkill_unregister(bluetooth_rfkill); - rfkill_destroy(bluetooth_rfkill); - return PTR_ERR(threeg_rfkill); + err = PTR_ERR(threeg_rfkill); + goto error_threeg; } } rfkill_inited = true; - if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) + if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && + has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); return 0; + +error_threeg: + if (has_cap(ACER_CAP_BLUETOOTH)) { + rfkill_unregister(bluetooth_rfkill); + rfkill_destroy(bluetooth_rfkill); + } +error_bluetooth: + if (has_cap(ACER_CAP_WIRELESS)) { + rfkill_unregister(wireless_rfkill); + rfkill_destroy(wireless_rfkill); + } +error_wireless: + return err; } static void acer_rfkill_exit(void) { - if (ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) + if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && + has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) cancel_delayed_work_sync(&acer_rfkill_work); - rfkill_unregister(wireless_rfkill); - rfkill_destroy(wireless_rfkill); + if (has_cap(ACER_CAP_WIRELESS)) { + rfkill_unregister(wireless_rfkill); + rfkill_destroy(wireless_rfkill); + } if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index d65df92e2ac..f0bf9c24480 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -643,12 +643,14 @@ static ssize_t show_infos(struct device *dev, /* * The HWRS method return informations about the hardware. * 0x80 bit is for WLAN, 0x100 for Bluetooth. + * 0x40 for WWAN, 0x10 for WIMAX. * The significance of others is yet to be found. - * If we don't find the method, we assume the device are present. + * We don't currently use this for device detection, and it + * takes several seconds to run on some systems. */ - rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp); + rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp); if (!ACPI_FAILURE(rv)) - len += sprintf(page + len, "HRWS value : %#x\n", + len += sprintf(page + len, "HWRS value : %#x\n", (uint) temp); /* * Another value for userspace: the ASYM method returns 0x02 for @@ -1271,7 +1273,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *model = NULL; - unsigned long long bsts_result, hwrs_result; + unsigned long long bsts_result; char *string = NULL; acpi_status status; @@ -1333,17 +1335,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus) if (*string) pr_notice(" %s model detected\n", string); - /* - * The HWRS method return informations about the hardware. - * 0x80 bit is for WLAN, 0x100 for Bluetooth, - * 0x40 for WWAN, 0x10 for WIMAX. - * The significance of others is yet to be found. - */ - status = - acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result); - if (!ACPI_FAILURE(status)) - pr_notice(" HRWS returned %x", (int)hwrs_result); - if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) asus->have_rsts = true; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 0580d99b079..9827fe9c4da 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -68,6 +68,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x8A, { KEY_PROG1 } }, { KE_KEY, 0x95, { KEY_MEDIA } }, { KE_KEY, 0x99, { KEY_PHONE } }, + { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */ + { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */ + { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */ + { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */ { KE_KEY, 0xb5, { KEY_CALC } }, { KE_KEY, 0xc4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } }, diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 5ffe7c39814..e66bbba9929 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include @@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = { MODULE_DEVICE_TABLE(pci, ips_id_table); +static int ips_blacklist_callback(const struct dmi_system_id *id) +{ + pr_info("Blacklisted intel_ips for %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id ips_blacklist[] = { + { + .callback = ips_blacklist_callback, + .ident = "HP ProBook", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"), + }, + }, + { } /* terminating entry */ +}; + static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) { u64 platform_info; @@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) u16 htshi, trc, trc_required_mask; u8 tse; + if (dmi_check_system(ips_blacklist)) + return -ENODEV; + ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL); if (!ips) return -ENOMEM; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index bbd182e178c..35dae412635 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -127,7 +127,7 @@ MODULE_PARM_DESC(minor, "default is -1 (automatic)"); #endif -static int kbd_backlight; /* = 1 */ +static int kbd_backlight = 1; module_param(kbd_backlight, int, 0444); MODULE_PARM_DESC(kbd_backlight, "set this to 0 to disable keyboard backlight, " diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 26c5b117df2..aaba423fa02 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8656,6 +8656,13 @@ static int __must_check __init get_thinkpad_model_data( tp->model_str = kstrdup(s, GFP_KERNEL); if (!tp->model_str) return -ENOMEM; + } else { + s = dmi_get_system_info(DMI_BIOS_VENDOR); + if (s && !(strnicmp(s, "Lenovo", 6))) { + tp->model_str = kstrdup(s, GFP_KERNEL); + if (!tp->model_str) + return -ENOMEM; + } } s = dmi_get_system_info(DMI_PRODUCT_NAME); diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index ca84d5099ce..8ac530a4c92 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -57,7 +57,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev) if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \ return 0 #define TEST_ALPHA(c) \ - if (!('@' <= (c) || (c) <= 'Z')) \ + if (!('A' <= (c) && (c) <= 'Z')) \ return 0 static int __init ispnpidacpi(const char *id) { @@ -94,6 +94,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) return -ENODEV; } + if (WARN_ON_ONCE(acpi_dev != dev->data)) + dev->data = acpi_dev; + ret = pnpacpi_build_resource_template(dev, &buffer); if (ret) return ret; @@ -320,9 +323,14 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp) { struct acpi_device *acpi = to_acpi_device(dev); struct pnp_dev *pnp = _pnp; + struct device *physical_device; + + physical_device = acpi_get_physical_device(acpi->handle); + if (physical_device) + put_device(physical_device); /* true means it matched */ - return !acpi_get_physical_device(acpi->handle) + return !physical_device && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); } diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index dfbd5a6cc58..258fef272ea 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev) } } +#ifdef CONFIG_AMD_NB + +#include + +static void quirk_amd_mmconfig_area(struct pnp_dev *dev) +{ + resource_size_t start, end; + struct pnp_resource *pnp_res; + struct resource *res; + struct resource mmconfig_res, *mmconfig; + + mmconfig = amd_get_mmconfig_range(&mmconfig_res); + if (!mmconfig) + return; + + list_for_each_entry(pnp_res, &dev->resources, list) { + res = &pnp_res->res; + if (res->end < mmconfig->start || res->start > mmconfig->end || + (res->start == mmconfig->start && res->end == mmconfig->end)) + continue; + + dev_info(&dev->dev, FW_BUG + "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n", + res, mmconfig); + if (mmconfig->start < res->start) { + start = mmconfig->start; + end = res->start - 1; + pnp_add_mem_resource(dev, start, end, 0); + } + if (mmconfig->end > res->end) { + start = res->end + 1; + end = mmconfig->end; + pnp_add_mem_resource(dev, start, end, 0); + } + break; + } +} +#endif + /* * PnP Quirks * Cards or devices that need some tweaking due to incomplete resource info @@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = { /* PnP resources that might overlap PCI BARs */ {"PNP0c01", quirk_system_pci_resources}, {"PNP0c02", quirk_system_pci_resources}, +#ifdef CONFIG_AMD_NB + {"PNP0c01", quirk_amd_mmconfig_area}, +#endif {""} }; diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index d63fddb0fbb..acda58e6ef0 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -195,7 +195,7 @@ static const unsigned int LDO12_suspend_table[] = { }; static const unsigned int LDO13_table[] = { - 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0, + 1200000, 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, }; static const unsigned int LDO13_suspend_table[] = { @@ -388,10 +388,10 @@ static struct pm8607_regulator_info pm8607_regulator_info[] = { PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1), PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2), PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3), - PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4), + PM8607_LDO(10, LDO10, 0, 4, SUPPLIES_EN12, 4), PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5), PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0), - PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6), + PM8607_LDO(14, LDO14, 0, 3, SUPPLIES_EN12, 6), }; static int __devinit pm8607_regulator_probe(struct platform_device *pdev) diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index ad6628ca94f..a8fb668c03a 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c @@ -688,7 +688,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev, } new_val++; - } while (desc->min + desc->step + new_val <= desc->max); + } while (desc->min + desc->step * new_val <= desc->max); new_idx = tmp_idx; new_val = tmp_val; diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c index 9166aa0a9df..229b6f4bb8b 100644 --- a/drivers/regulator/tps6524x-regulator.c +++ b/drivers/regulator/tps6524x-regulator.c @@ -481,7 +481,7 @@ static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, if (i >= info->n_voltages) i = info->n_voltages - 1; - *selector = info->voltages[i]; + *selector = i; return write_field(hw, &info->voltage, i); } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index eb4c88316a1..636a2ec2181 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -227,11 +227,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) alarm->time.tm_hour = now.tm_hour; /* For simplicity, only support date rollover for now */ - if (alarm->time.tm_mday == -1) { + if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { alarm->time.tm_mday = now.tm_mday; missing = day; } - if (alarm->time.tm_mon == -1) { + if ((unsigned)alarm->time.tm_mon >= 12) { alarm->time.tm_mon = now.tm_mon; if (missing == none) missing = month; @@ -762,6 +762,14 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) return 0; } +static void rtc_alarm_disable(struct rtc_device *rtc) +{ + if (!rtc->ops || !rtc->ops->alarm_irq_enable) + return; + + rtc->ops->alarm_irq_enable(rtc->dev.parent, false); +} + /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device @@ -783,8 +791,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); - if (!next) + if (!next) { + rtc_alarm_disable(rtc); return; + } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); @@ -846,7 +856,8 @@ void rtc_timer_do_work(struct work_struct *work) err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; - } + } else + rtc_alarm_disable(rtc); mutex_unlock(&rtc->ops_lock); } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 911e75cdc12..cd61178ed09 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -805,9 +805,8 @@ static int cmos_suspend(struct device *dev) mask = RTC_IRQMASK; tmp &= ~mask; CMOS_WRITE(tmp, RTC_CONTROL); + hpet_mask_rtc_irq_bit(mask); - /* shut down hpet emulation - we don't need it for alarm */ - hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE); cmos_checkintr(cmos, tmp); } spin_unlock_irq(&rtc_lock); @@ -872,6 +871,7 @@ static int cmos_resume(struct device *dev) rtc_update_irq(cmos->rtc, 1, mask); tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); + hpet_rtc_timer_init(); } while (mask & RTC_AIE); spin_unlock_irq(&rtc_lock); } diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 2dd3c016327..d007609555c 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -391,6 +391,8 @@ static int dryice_rtc_probe(struct platform_device *pdev) if (imxdi->ioaddr == NULL) return -ENOMEM; + spin_lock_init(&imxdi->irq_lock); + imxdi->irq = platform_get_irq(pdev, 0); if (imxdi->irq < 0) return imxdi->irq; diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index da8beb8cae5..627b66aa78d 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; + struct rtc_device *rtc = i2c_get_clientdata(client); int handled = 0, sr, err; /* @@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data) if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); + rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 39e41fbdf08..51603543def 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -191,10 +191,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id) struct platform_device *pdev = dev_id; struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr; + unsigned long flags; u32 status; u32 events = 0; - spin_lock_irq(&pdata->rtc->irq_lock); + spin_lock_irqsave(&pdata->rtc->irq_lock, flags); status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR); /* clear interrupt sources */ writew(status, ioaddr + RTC_RTCISR); @@ -217,7 +218,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id) rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm); rtc_update_irq(pdata->rtc, 1, events); - spin_unlock_irq(&pdata->rtc->irq_lock); + spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags); return IRQ_HANDLED; } diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index 71bab0ef544..646f6fe52cf 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -263,6 +263,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi) if (!(rxbuf[0] & 0x20)) { dev_err(&spi->dev, "chip not found\n"); + ret = -ENODEV; goto kfree_exit; } diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index ff1b84bd9bb..1e80a48057e 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) int ret; struct pl031_local *ldata; struct rtc_class_ops *ops = id->data; + unsigned long time; ret = amba_request_regions(adev, NULL); if (ret) @@ -339,11 +340,27 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision); /* Enable the clockwatch on ST Variants */ - if ((ldata->hw_designer == AMBA_VENDOR_ST) && - (ldata->hw_revision > 1)) + if (ldata->hw_designer == AMBA_VENDOR_ST) writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, ldata->base + RTC_CR); + /* + * On ST PL031 variants, the RTC reset value does not provide correct + * weekday for 2000-01-01. Correct the erroneous sunday to saturday. + */ + if (ldata->hw_designer == AMBA_VENDOR_ST) { + if (readl(ldata->base + RTC_YDR) == 0x2000) { + time = readl(ldata->base + RTC_DR); + if ((time & + (RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK)) + == 0x02120000) { + time = time | (0x7 << RTC_WDAY_SHIFT); + writel(0x2000, ldata->base + RTC_YLR); + writel(time, ldata->base + RTC_LR); + } + } + } + ldata->rtc = rtc_device_register("pl031", &adev->dev, ops, THIS_MODULE); if (IS_ERR(ldata->rtc)) { diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c index 368d0e63cf8..15e6d5cb111 100644 --- a/drivers/rtc/rtc-rs5c348.c +++ b/drivers/rtc/rtc-rs5c348.c @@ -121,9 +121,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); if (!pdata->rtc_24h) { - tm->tm_hour %= 12; - if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) + if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) { + tm->tm_hour -= 20; + tm->tm_hour %= 12; tm->tm_hour += 12; + } else + tm->tm_hour %= 12; } tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c index f9a2799c44d..5e4e725440a 100644 --- a/drivers/rtc/rtc-twl.c +++ b/drivers/rtc/rtc-twl.c @@ -490,6 +490,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) goto out2; } + /* ensure interrupts are disabled, bootloaders can be strange */ + ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); + if (ret < 0) + dev_warn(&pdev->dev, "unable to disable interrupt\n"); + /* init cached IRQ enable bits */ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); if (ret < 0) diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c index efd6066b5cd..8a16f2c737a 100644 --- a/drivers/rtc/rtc-vt8500.c +++ b/drivers/rtc/rtc-vt8500.c @@ -69,7 +69,7 @@ | ALARM_SEC_BIT) #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ -#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */ +#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ @@ -116,7 +116,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); - tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S); + tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1; tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; @@ -135,8 +135,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) } writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) - | (bin2bcd(tm->tm_mon) << DATE_MONTH_S) - | (bin2bcd(tm->tm_mday)), + | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S) + | (bin2bcd(tm->tm_mday)) + | ((tm->tm_year >= 200) << DATE_CENTURY_S), vt8500_rtc->regbase + VT8500_RTC_DS); writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) @@ -246,7 +247,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev) } /* Enable RTC and set it to 24-hour mode */ - writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H, + writel(VT8500_RTC_CR_ENABLE, vt8500_rtc->regbase + VT8500_RTC_CR); vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c index bdc909bd56d..f3c211099a1 100644 --- a/drivers/rtc/rtc-wm831x.c +++ b/drivers/rtc/rtc-wm831x.c @@ -24,7 +24,7 @@ #include #include #include - +#include /* * R16416 (0x4020) - RTC Write Counter @@ -96,6 +96,26 @@ struct wm831x_rtc { unsigned int alarm_enabled:1; }; +static void wm831x_rtc_add_randomness(struct wm831x *wm831x) +{ + int ret; + u16 reg; + + /* + * The write counter contains a pseudo-random number which is + * regenerated every time we set the RTC so it should be a + * useful per-system source of entropy. + */ + ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER); + if (ret >= 0) { + reg = ret; + add_device_randomness(®, sizeof(reg)); + } else { + dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n", + ret); + } +} + /* * Read current time and date in RTC */ @@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev) alm_irq, ret); } + wm831x_rtc_add_randomness(wm831x); + return 0; err: diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 30fb979d684..cc2dd7fb289 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -18,12 +18,12 @@ #include /* HDIO_GETGEO */ #include #include +#include #include #include #include #include -#include #include #include #include diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 72261e4c516..9caeaea5d09 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -13,6 +13,7 @@ #define KMSG_COMPONENT "dasd" #include +#include #include #include #include diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index f6489eb7e97..2150824303a 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index be55fb2b1b1..a0ce2a907b3 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -507,6 +507,8 @@ static void __init sclp_add_standby_memory(void) add_memory_merged(0); } +#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS) + static void __init insert_increment(u16 rn, int standby, int assigned) { struct memory_increment *incr, *new_incr; @@ -519,7 +521,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) new_incr->rn = rn; new_incr->standby = standby; if (!standby) - new_incr->usecount = 1; + new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 31a3ccbb649..84e569c9c15 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index e950f1ad4dd..ec760297dd3 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 07a4fd29f09..daa6b903aa9 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2) * Determine pathgroup state from PGID data. */ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, - int *mismatch, int *reserved, u8 *reset) + int *mismatch, u8 *reserved, u8 *reset) { struct pgid *pgid = &cdev->private->pgid[0]; struct pgid *first = NULL; @@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, if ((cdev->private->pgid_valid_mask & lpm) == 0) continue; if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) - *reserved = 1; + *reserved |= lpm; if (pgid_is_reset(pgid)) { *reset |= lpm; continue; @@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc) struct subchannel *sch = to_subchannel(cdev->dev.parent); struct pgid *pgid; int mismatch = 0; - int reserved = 0; + u8 reserved = 0; u8 reset = 0; u8 donepm; if (rc) goto out; pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); - if (reserved) + if (reserved == cdev->private->pgid_valid_mask) rc = -EUSERS; else if (mismatch) rc = -EOPNOTSUPP; @@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc) } out: CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " - "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid, + "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, cdev->private->pgid_todo_mask, mismatch, reserved, reset); switch (rc) { diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index aec60d55b10..481037d62b5 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -412,6 +412,26 @@ static void kvm_extint_handler(unsigned int ext_int_code, } } +/* + * For s390-virtio, we expect a page above main storage containing + * the virtio configuration. Try to actually load from this area + * in order to figure out if the host provides this page. + */ +static int __init test_devices_support(unsigned long addr) +{ + int ret = -EIO; + + asm volatile( + "0: lura 0,%1\n" + "1: xgr %0,%0\n" + "2:\n" + EX_TABLE(0b,2b) + EX_TABLE(1b,2b) + : "+d" (ret) + : "a" (addr) + : "0", "cc"); + return ret; +} /* * Init function for virtio * devices are in a single page above top of "normal" mem @@ -423,21 +443,23 @@ static int __init kvm_devices_init(void) if (!MACHINE_IS_KVM) return -ENODEV; + if (test_devices_support(real_memory_size) < 0) + return -ENODEV; + + rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); + if (rc) + return rc; + + kvm_devices = (void *) real_memory_size; + kvm_root = root_device_register("kvm_s390"); if (IS_ERR(kvm_root)) { rc = PTR_ERR(kvm_root); printk(KERN_ERR "Could not register kvm_s390 root device"); + vmem_remove_mapping(real_memory_size, PAGE_SIZE); return rc; } - rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); - if (rc) { - root_device_unregister(kvm_root); - return rc; - } - - kvm_devices = (void *) real_memory_size; - INIT_WORK(&hotplug_work, hotplug_devices); service_subclass_irq_register(); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index fd69da3fa6b..e2c9ac5fcb3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2742,9 +2742,14 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; - - if (skb_dst(skb) && skb_dst(skb)->neighbour) { - cast_type = skb_dst(skb)->neighbour->type; + struct neighbour *n = NULL; + struct dst_entry *dst; + + dst = skb_dst(skb); + if (dst) + n = dst_get_neighbour(dst); + if (n) { + cast_type = n->type; if ((cast_type == RTN_BROADCAST) || (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) @@ -2787,6 +2792,9 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int ipv, int cast_type) { + struct neighbour *n = NULL; + struct dst_entry *dst; + memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.ext_flags = 0; @@ -2804,13 +2812,16 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); + dst = skb_dst(skb); + if (dst) + n = dst_get_neighbour(dst); if (ipv == 4) { /* IPv4 */ hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); memset(hdr->hdr.l3.dest_addr, 0, 12); - if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { + if (n) { *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = - *((u32 *) skb_dst(skb)->neighbour->primary_key); + *((u32 *) n->primary_key); } else { /* fill in destination address used in ip header */ *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = @@ -2821,9 +2832,9 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type); if (card->info.type == QETH_CARD_TYPE_IQD) hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; - if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) { + if (n) { memcpy(hdr->hdr.l3.dest_addr, - skb_dst(skb)->neighbour->primary_key, 16); + n->primary_key, 16); } else { /* fill in destination address used in ip header */ memcpy(hdr->hdr.l3.dest_addr, diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 645b0fcbb37..61da2cd2250 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -518,6 +518,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, rwlock_init(&port->unit_list_lock); INIT_LIST_HEAD(&port->unit_list); + atomic_set(&port->units, 0); INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index e8b7cee6204..de1bcfa23f3 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -38,17 +38,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter) spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); } -static int zfcp_ccw_activate(struct ccw_device *cdev) - +/** + * zfcp_ccw_activate - activate adapter and wait for it to finish + * @cdev: pointer to belonging ccw device + * @clear: Status flags to clear. + * @tag: s390dbf trace record tag + */ +static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; + zfcp_erp_clear_adapter_status(adapter, clear); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ccresu2"); + tag); zfcp_erp_wait(adapter); flush_work(&adapter->scan_work); @@ -163,32 +169,47 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); adapter->req_no = 0; - zfcp_ccw_activate(cdev); + zfcp_ccw_activate(cdev, 0, "ccsonl1"); zfcp_ccw_adapter_put(adapter); return 0; } /** - * zfcp_ccw_set_offline - set_offline function of zfcp driver + * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish * @cdev: pointer to belonging ccw device + * @set: Status flags to set. + * @tag: s390dbf trace record tag * * This function gets called by the common i/o layer and sets an adapter * into state offline. */ -static int zfcp_ccw_set_offline(struct ccw_device *cdev) +static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag) { struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); if (!adapter) return 0; - zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1"); + zfcp_erp_set_adapter_status(adapter, set); + zfcp_erp_adapter_shutdown(adapter, 0, tag); zfcp_erp_wait(adapter); zfcp_ccw_adapter_put(adapter); return 0; } +/** + * zfcp_ccw_set_offline - set_offline function of zfcp driver + * @cdev: pointer to belonging ccw device + * + * This function gets called by the common i/o layer and sets an adapter + * into state offline. + */ +static int zfcp_ccw_set_offline(struct ccw_device *cdev) +{ + return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1"); +} + /** * zfcp_ccw_notify - ccw notify function * @cdev: pointer to belonging ccw device @@ -206,6 +227,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) switch (event) { case CIO_GONE: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ + zfcp_dbf_hba_basic("ccnigo1", adapter); + break; + } dev_warn(&cdev->dev, "The FCP device has been detached\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); break; @@ -215,6 +241,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); break; case CIO_OPER: + if (atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */ + zfcp_dbf_hba_basic("ccniop1", adapter); + break; + } dev_info(&cdev->dev, "The FCP device is operational again\n"); zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); @@ -250,6 +281,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) zfcp_ccw_adapter_put(adapter); } +static int zfcp_ccw_suspend(struct ccw_device *cdev) +{ + zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1"); + return 0; +} + +static int zfcp_ccw_thaw(struct ccw_device *cdev) +{ + /* trace records for thaw and final shutdown during suspend + can only be found in system dump until the end of suspend + but not after resume because it's based on the memory image + right after the very first suspend (freeze) callback */ + zfcp_ccw_activate(cdev, 0, "ccthaw1"); + return 0; +} + +static int zfcp_ccw_resume(struct ccw_device *cdev) +{ + zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1"); + return 0; +} + struct ccw_driver zfcp_ccw_driver = { .driver = { .owner = THIS_MODULE, @@ -262,7 +315,7 @@ struct ccw_driver zfcp_ccw_driver = { .set_offline = zfcp_ccw_set_offline, .notify = zfcp_ccw_notify, .shutdown = zfcp_ccw_shutdown, - .freeze = zfcp_ccw_set_offline, - .thaw = zfcp_ccw_activate, - .restore = zfcp_ccw_activate, + .freeze = zfcp_ccw_suspend, + .thaw = zfcp_ccw_thaw, + .restore = zfcp_ccw_resume, }; diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index 303dde09d29..8ed63aa9abe 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -11,6 +11,7 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include #include #include @@ -292,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) } read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, port->adapter->scsi_host) { + shost_for_each_device(sdev, adapter->scsi_host) { zfcp_sdev = sdev_to_zfcp(sdev); status = atomic_read(&zfcp_sdev->status); if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 96d1462e0bf..8b18dc04f06 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -163,6 +163,26 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) spin_unlock_irqrestore(&dbf->hba_lock, flags); } +/** + * zfcp_dbf_hba_basic - trace event for basic adapter events + * @adapter: pointer to struct zfcp_adapter + */ +void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter) +{ + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_hba *rec = &dbf->hba_buf; + unsigned long flags; + + spin_lock_irqsave(&dbf->hba_lock, flags); + memset(rec, 0, sizeof(*rec)); + + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); + rec->id = ZFCP_DBF_HBA_BASIC; + + debug_event(dbf->hba, 1, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); +} + static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, struct zfcp_adapter *adapter, struct zfcp_port *port, diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 714f087eb7a..3ac7a4b30dd 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id { ZFCP_DBF_HBA_RES = 1, ZFCP_DBF_HBA_USS = 2, ZFCP_DBF_HBA_BIT = 3, + ZFCP_DBF_HBA_BASIC = 4, }; /** diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 527ba48eea5..ebbf7606c13 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -76,6 +76,7 @@ struct zfcp_reqlist; #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 +#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 @@ -203,6 +204,7 @@ struct zfcp_port { struct zfcp_adapter *adapter; /* adapter used to access port */ struct list_head unit_list; /* head of logical unit list */ rwlock_t unit_list_lock; /* unit list lock */ + atomic_t units; /* zfcp_unit count */ atomic_t status; /* status of this remote port */ u64 wwnn; /* WWNN if known */ u64 wwpn; /* WWPN */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 03627cfd81c..3ad6399cc8b 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -53,6 +53,7 @@ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); +extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); @@ -157,6 +158,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); extern struct attribute_group zfcp_sysfs_unit_attrs; extern struct attribute_group zfcp_sysfs_adapter_attrs; extern struct attribute_group zfcp_sysfs_port_attrs; +extern struct mutex zfcp_sysfs_port_units_mutex; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 022fb6a8cb8..6e73bfe92da 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) return; } - zfcp_dbf_hba_fsf_uss("fssrh_2", req); + zfcp_dbf_hba_fsf_uss("fssrh_4", req); switch (sr_buf->status_type) { case FSF_STATUS_READ_PORT_CLOSED: @@ -771,12 +771,14 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) { struct scsi_device *sdev = req->data; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: if (fsq->word[0] == fsq->word[1]) { @@ -885,7 +887,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_dbf_san_res("fsscth1", req); + zfcp_dbf_san_res("fsscth2", req); ct->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1730,13 +1732,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; struct scsi_device *sdev = req->data; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED | ZFCP_STATUS_LUN_SHARED | @@ -1847,11 +1851,13 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) { struct scsi_device *sdev = req->data; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); @@ -1941,7 +1947,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) { struct fsf_qual_latency_info *lat_in; struct latency_cont *lat = NULL; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device); + struct zfcp_scsi_dev *zfcp_sdev; struct zfcp_blk_drv_data blktrc; int ticks = req->adapter->timer_ticks; @@ -1956,6 +1962,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { + zfcp_sdev = sdev_to_zfcp(scsi->device); blktrc.flags |= ZFCP_BLK_LAT_VALID; blktrc.channel_lat = lat_in->channel_lat * ticks; blktrc.fabric_lat = lat_in->fabric_lat * ticks; @@ -1993,12 +2000,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) { struct scsi_cmnd *scmnd = req->data; struct scsi_device *sdev = scmnd->device; - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_scsi_dev *zfcp_sdev; struct fsf_qtcb_header *header = &req->qtcb->header; if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) return; + zfcp_sdev = sdev_to_zfcp(sdev); + switch (header->fsf_status) { case FSF_HANDLE_MISMATCH: case FSF_PORT_HANDLE_NOT_VALID: diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index cdc4ff78a7b..9e62210b294 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, zfcp_sysfs_port_rescan_store); +DEFINE_MUTEX(zfcp_sysfs_port_units_mutex); + static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, else retval = 0; + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) > 0) { + retval = -EBUSY; + mutex_unlock(&zfcp_sysfs_port_units_mutex); + goto out; + } + /* port is about to be removed, so no more unit_add */ + atomic_set(&port->units, -1); + mutex_unlock(&zfcp_sysfs_port_units_mutex); + write_lock_irq(&adapter->port_list_lock); list_del(&port->list); write_unlock_irq(&adapter->port_list_lock); @@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); u64 fcp_lun; + int retval; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) return -EINVAL; - if (zfcp_unit_add(port, fcp_lun)) - return -EINVAL; + retval = zfcp_unit_add(port, fcp_lun); + if (retval) + return retval; return count; } diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 20796ebc33c..4e6a5356bdb 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev) { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); - put_device(&unit->port->dev); + atomic_dec(&unit->port->units); kfree(unit); } @@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev) int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; + int retval = 0; + + mutex_lock(&zfcp_sysfs_port_units_mutex); + if (atomic_read(&port->units) == -1) { + /* port is already gone */ + retval = -ENODEV; + goto out; + } unit = zfcp_unit_find(port, fcp_lun); if (unit) { put_device(&unit->dev); - return -EEXIST; + retval = -EEXIST; + goto out; } unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); - if (!unit) - return -ENOMEM; + if (!unit) { + retval = -ENOMEM; + goto out; + } unit->port = port; unit->fcp_lun = fcp_lun; @@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) if (dev_set_name(&unit->dev, "0x%016llx", (unsigned long long) fcp_lun)) { kfree(unit); - return -ENOMEM; + retval = -ENOMEM; + goto out; } - get_device(&port->dev); - if (device_register(&unit->dev)) { put_device(&unit->dev); - return -ENOMEM; + retval = -ENOMEM; + goto out; } if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->dev); - return -EINVAL; + retval = -EINVAL; + goto out; } + atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */ + write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); zfcp_unit_scsi_scan(unit); - return 0; +out: + mutex_unlock(&zfcp_sysfs_port_units_mutex); + return retval; } /** diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 7e6eca4a125..59fc5a1fdae 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -1174,7 +1174,16 @@ static void tscam(struct Scsi_Host *host) outw(val, tmport); outb(2, 0x80); TCM_SYNC: - udelay(0x800); + /* + * The funny division into multiple delays is to accomodate + * arches like ARM where udelay() multiplies its argument by + * a large number to initialize a loop counter. To avoid + * overflow, the maximum supported udelay is 2000 microseconds. + * + * XXX it would be more polite to find a way to use msleep() + */ + mdelay(2); + udelay(48); if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */ outw(0, tmport--); outb(0, tmport); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 5c54a2d9b83..ca397f80ff0 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1260,6 +1260,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) int rc = 0; u64 mask64; + memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1)); + memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2)); + bnx2i_adjust_qp_size(hba); iscsi_init.flags = diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index b2d661147a4..143f2682bda 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -985,7 +985,7 @@ static int init_act_open(struct cxgbi_sock *csk) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; - csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); + csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index f3a4cd7cf78..ae13c4993aa 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1160,7 +1160,7 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0); + csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0); if (!csk->l2t) { pr_err("%s, cannot alloc l2t.\n", ndev->name); goto rel_resource; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index a2a9c7c6c64..77ac217ad5c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -492,7 +492,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) goto err_out; } dst = &rt->dst; - ndev = dst->neighbour->dev; + ndev = dst_get_neighbour(dst)->dev; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { pr_info("multi-cast route %pI4, port %u, dev %s.\n", @@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); mtu = ndev->mtu; pr_info("rt dev %s, loopback -> %s, mtu %u.\n", - dst->neighbour->dev->name, ndev->name, mtu); + dst_get_neighbour(dst)->dev->name, ndev->name, mtu); } cdev = cxgbi_device_find_by_netdev(ndev, &port); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index f5b718d3c31..aed7756bdbc 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb, dcb->max_command = 1; dcb->target_id = target; dcb->target_lun = lun; + dcb->dev_mode = eeprom->target[target].cfg0; #ifndef DC395x_NO_DISCONNECT dcb->identify_msg = IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun); #else dcb->identify_msg = IDENTIFY(0, lun); #endif - dcb->dev_mode = eeprom->target[target].cfg0; dcb->inquiry7 = 0; dcb->sync_mode = 0; dcb->min_nego_period = clock_period[period_index]; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 6fec9fe5dc3..5391e6a1852 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -619,8 +619,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) h->state = TPGS_STATE_STANDBY; break; case TPGS_STATE_OFFLINE: - case TPGS_STATE_UNAVAILABLE: - /* Path unusable for unavailable/offline */ + /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; break; default: diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 56a9f3f676e..1e33d39a722 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1209,8 +1209,9 @@ static void complete_scsi_command(struct CommandList *cp) } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p has " - "protocol error \n", cp); + "protocol error\n", cp); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; @@ -1654,30 +1655,26 @@ static void figure_bus_target_lun(struct ctlr_info *h, if (is_logical_dev_addr_mode(lunaddrbytes)) { /* logical device */ - if (unlikely(is_scsi_rev_5(h))) { - /* p1210m, logical drives lun assignments - * match SCSI REPORT LUNS data. + lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); + if (is_msa2xxx(h, device)) { + /* msa2xxx way, put logicals on bus 1 + * and match target/lun numbers box + * reports. */ - lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); - *bus = 0; - *target = 0; - *lun = (lunid & 0x3fff) + 1; + *bus = 1; + *target = (lunid >> 16) & 0x3fff; + *lun = lunid & 0x00ff; } else { - /* not p1210m... */ - lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); - if (is_msa2xxx(h, device)) { - /* msa2xxx way, put logicals on bus 1 - * and match target/lun numbers box - * reports. - */ - *bus = 1; - *target = (lunid >> 16) & 0x3fff; - *lun = lunid & 0x00ff; + if (likely(is_scsi_rev_5(h))) { + /* All current smart arrays (circa 2011) */ + *bus = 0; + *target = 0; + *lun = (lunid & 0x3fff) + 1; } else { - /* Traditional smart array way. */ + /* Traditional old smart array way. */ *bus = 0; - *lun = 0; *target = lunid & 0x3fff; + *lun = 0; } } } else { @@ -2896,7 +2893,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; - c->Request.CDB[1] = 0x03; /* Reset target above */ + c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; /* If bytes 4-7 are zero, it means reset the */ /* LunID device */ c->Request.CDB[4] = 0x00; @@ -4037,10 +4034,10 @@ static int hpsa_request_irq(struct ctlr_info *h, if (h->msix_vector || h->msi_vector) rc = request_irq(h->intr[h->intr_mode], msixhandler, - IRQF_DISABLED, h->devname, h); + 0, h->devname, h); else rc = request_irq(h->intr[h->intr_mode], intxhandler, - IRQF_DISABLED, h->devname, h); + IRQF_SHARED, h->devname, h); if (rc) { dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", h->intr[h->intr_mode], h->devname); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 3d391dc3f11..36aca4b1ec0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1547,6 +1547,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, host_config = &evt_struct->iu.mad.host_config; + /* The transport length field is only 16-bit */ + length = min(0xffff, length); + /* Set up a lun reset SRP command */ memset(host_config, 0x00, sizeof(*host_config)); host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 61e0d09e2b5..0365d580c97 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -454,11 +454,10 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic if (!orom) orom = isci_request_oprom(pdev); - for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { + for (i = 0; orom && i < num_controllers(pdev); i++) { if (sci_oem_parameters_validate(&orom->ctrl[i])) { dev_warn(&pdev->dev, "[%d]: invalid oem parameters detected, falling back to firmware\n", i); - devm_kfree(&pdev->dev, orom); orom = NULL; break; } diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index b5f4341de24..7cd637d501a 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) if (i >= len) { dev_err(&pdev->dev, "oprom parse error\n"); - devm_kfree(&pdev->dev, rom); rom = NULL; } pci_unmap_biosrom(oprom); diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 225b196800a..b70f9992b83 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -1693,7 +1693,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, frame_index, (void **)&frame_buffer); - sci_controller_copy_sata_response(&ireq->stp.req, + sci_controller_copy_sata_response(&ireq->stp.rsp, frame_header, frame_buffer); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 37cbe4d3bb9..d2f95761ba3 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -192,7 +192,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; - memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); + /* help some expanders that fail to zero sas_address in the 'no + * device' case + */ + if (phy->attached_dev_type == NO_DEVICE || + phy->linkrate < SAS_LINK_RATE_1_5_GBPS) + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + else + memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; @@ -763,7 +770,7 @@ static struct domain_device *sas_ex_discover_end_dev( } /* See if this phy is part of a wide port */ -static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) +static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id) { struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; int i; @@ -779,11 +786,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id) sas_port_add_phy(ephy->port, phy->phy); phy->port = ephy->port; phy->phy_state = PHY_DEVICE_DISCOVERED; - return 0; + return true; } } - return -ENODEV; + return false; } static struct domain_device *sas_ex_discover_expander( @@ -921,8 +928,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) return res; } - res = sas_ex_join_wide_port(dev, phy_id); - if (!res) { + if (sas_ex_join_wide_port(dev, phy_id)) { SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", phy_id, SAS_ADDR(ex_phy->attached_sas_addr)); return res; @@ -967,8 +973,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == SAS_ADDR(child->sas_addr)) { ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; - res = sas_ex_join_wide_port(dev, i); - if (!res) + if (sas_ex_join_wide_port(dev, i)) SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n", i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr)); @@ -1632,9 +1637,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); - if (res) - goto out; - else if (phy_change_count != ex->ex_phy[i].phy_change_count) { + switch (res) { + case SMP_RESP_PHY_VACANT: + case SMP_RESP_NO_PHY: + continue; + case SMP_RESP_FUNC_ACC: + break; + default: + return res; + } + + if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; @@ -1642,8 +1655,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, return 0; } } -out: - return res; + return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) @@ -1824,32 +1836,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) { struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; struct domain_device *child; - bool found = false; - int res, i; + int res; SAS_DPRINTK("ex %016llx phy%d new device attached\n", SAS_ADDR(dev->sas_addr), phy_id); res = sas_ex_phy_discover(dev, phy_id); if (res) - goto out; - /* to support the wide port inserted */ - for (i = 0; i < dev->ex_dev.num_phys; i++) { - struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i]; - if (i == phy_id) - continue; - if (SAS_ADDR(ex_phy_temp->attached_sas_addr) == - SAS_ADDR(ex_phy->attached_sas_addr)) { - found = true; - break; - } - } - if (found) { - sas_ex_join_wide_port(dev, phy_id); + return res; + + if (sas_ex_join_wide_port(dev, phy_id)) return 0; - } + res = sas_ex_discover_devices(dev, phy_id); - if (!res) - goto out; + if (res) + return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { @@ -1859,7 +1859,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) break; } } -out: return res; } @@ -1958,9 +1957,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) struct domain_device *dev = NULL; res = sas_find_bcast_dev(port_dev, &dev); - if (res) - goto out; - if (dev) { + while (res == 0 && dev) { struct expander_device *ex = &dev->ex_dev; int i = 0, phy_id; @@ -1972,8 +1969,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) res = sas_rediscover(dev, phy_id); i = phy_id + 1; } while (i < ex->num_phys); + + dev = NULL; + res = sas_find_bcast_dev(port_dev, &dev); } -out: return res; } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e6e30f4da1f..931cb11083c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4052,7 +4052,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); - spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); @@ -5380,6 +5379,8 @@ static int __init megasas_init(void) printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); + spin_lock_init(&poll_aen_lock); + support_poll_for_event = 2; support_device_change = 1; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 39e81cd567a..679fe6a773b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -66,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ +#define MAX_HBA_QUEUE_DEPTH 30000 +#define MAX_CHAIN_DEPTH 100000 static int max_queue_depth = -1; module_param(max_queue_depth, int, 0); MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); @@ -1095,6 +1097,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) u16 message_control; + /* Check whether controller SAS2008 B0 controller, + if it is SAS2008 B0 controller use IO-APIC instead of MSIX */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + ioc->pdev->revision == 0x01) { + return -EINVAL; + } + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); if (!base) { dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " @@ -2098,8 +2107,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) } if (ioc->chain_dma_pool) pci_pool_destroy(ioc->chain_dma_pool); - } - if (ioc->chain_lookup) { free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); ioc->chain_lookup = NULL; } @@ -2117,9 +2124,7 @@ static int _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) { struct mpt2sas_facts *facts; - u32 queue_size, queue_diff; u16 max_sge_elements; - u16 num_of_reply_frames; u16 chains_needed_per_io; u32 sz, total_sz; u32 retry_sz; @@ -2142,11 +2147,15 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } /* command line tunables for max controller queue depth */ - if (max_queue_depth != -1) - max_request_credit = (max_queue_depth < facts->RequestCredit) - ? max_queue_depth : facts->RequestCredit; - else - max_request_credit = facts->RequestCredit; + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->hi_priority_depth + ioc->internal_depth, + facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else + max_request_credit = min_t(u16, facts->RequestCredit, + MAX_HBA_QUEUE_DEPTH); ioc->hba_queue_depth = max_request_credit; ioc->hi_priority_depth = facts->HighPriorityCredit; @@ -2187,50 +2196,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } ioc->chains_needed_per_io = chains_needed_per_io; - /* reply free queue sizing - taking into account for events */ - num_of_reply_frames = ioc->hba_queue_depth + 32; - - /* number of replies frames can't be a multiple of 16 */ - /* decrease number of reply frames by 1 */ - if (!(num_of_reply_frames % 16)) - num_of_reply_frames--; - - /* calculate number of reply free queue entries - * (must be multiple of 16) - */ - - /* (we know reply_free_queue_depth is not a multiple of 16) */ - queue_size = num_of_reply_frames; - queue_size += 16 - (queue_size % 16); - ioc->reply_free_queue_depth = queue_size; + /* reply free queue sizing - taking into account for 64 FW events */ + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; - /* reply descriptor post queue sizing */ - /* this size should be the number of request frames + number of reply - * frames - */ - - queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1; - /* round up to 16 byte boundary */ - if (queue_size % 16) - queue_size += 16 - (queue_size % 16); - - /* check against IOC maximum reply post queue depth */ - if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) { - queue_diff = queue_size - - facts->MaxReplyDescriptorPostQueueDepth; - - /* round queue_diff up to multiple of 16 */ - if (queue_diff % 16) - queue_diff += 16 - (queue_diff % 16); - - /* adjust hba_queue_depth, reply_free_queue_depth, - * and queue_size - */ - ioc->hba_queue_depth -= (queue_diff / 2); - ioc->reply_free_queue_depth -= (queue_diff / 2); - queue_size = facts->MaxReplyDescriptorPostQueueDepth; + /* align the reply post queue on the next 16 count boundary */ + if (!ioc->reply_free_queue_depth % 16) + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16; + else + ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + + 32 - (ioc->reply_free_queue_depth % 16); + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = min_t(u16, + (facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16)), + (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16))); + ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16; + ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64; } - ioc->reply_post_queue_depth = queue_size; + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: " "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " @@ -2244,7 +2228,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ - ioc->shost->can_queue = ioc->scsiio_depth - (2); + ioc->shost->can_queue = ioc->scsiio_depth; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); @@ -2316,15 +2300,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) "depth(%d)\n", ioc->name, ioc->request, ioc->scsiio_depth)); - /* loop till the allocation succeeds */ - do { - sz = ioc->chain_depth * sizeof(struct chain_tracker); - ioc->chain_pages = get_order(sz); - ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( - GFP_KERNEL, ioc->chain_pages); - if (ioc->chain_lookup == NULL) - ioc->chain_depth -= 100; - } while (ioc->chain_lookup == NULL); + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); + sz = ioc->chain_depth * sizeof(struct chain_tracker); + ioc->chain_pages = get_order(sz); + + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( + GFP_KERNEL, ioc->chain_pages); ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, ioc->request_sz, 16, 0); if (!ioc->chain_dma_pool) { @@ -3085,7 +3066,7 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag) } pfacts = &ioc->pfacts[port]; - memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t)); + memset(pfacts, 0, sizeof(struct mpt2sas_port_facts)); pfacts->PortNumber = mpi_reply.PortNumber; pfacts->VP_ID = mpi_reply.VP_ID; pfacts->VF_ID = mpi_reply.VF_ID; @@ -3127,7 +3108,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } facts = &ioc->facts; - memset(facts, 0, sizeof(Mpi2IOCFactsReply_t)); + memset(facts, 0, sizeof(struct mpt2sas_facts)); facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); facts->VP_ID = mpi_reply.VP_ID; @@ -3808,7 +3789,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) goto out_free_resources; ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, - sizeof(Mpi2PortFactsReply_t), GFP_KERNEL); + sizeof(struct mpt2sas_port_facts), GFP_KERNEL); if (!ioc->pfacts) { r = -ENOMEM; goto out_free_resources; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index f88e52a1a39..aa51195a731 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -974,8 +974,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid) spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); if (list_empty(&ioc->free_chain_list)) { spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - printk(MPT2SAS_WARN_FMT "chain buffers not available\n", - ioc->name); + dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not " + "available\n", ioc->name)); return NULL; } chain_req = list_entry(ioc->free_chain_list.next, @@ -6425,6 +6425,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid, } else sas_target_priv_data = NULL; raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); starget_printk(KERN_INFO, raid_device->starget, "handle(0x%04x), wwid(0x%016llx)\n", handle, (unsigned long long)raid_device->wwid); @@ -6435,16 +6436,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid, */ _scsih_init_warpdrive_properties(ioc, raid_device); if (raid_device->handle == handle) - goto out; + return; printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", raid_device->handle); raid_device->handle = handle; if (sas_target_priv_data) sas_target_priv_data->handle = handle; - goto out; + return; } } - out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } @@ -7211,6 +7212,7 @@ _scsih_remove(struct pci_dev *pdev) } sas_remove_host(shost); + mpt2sas_base_detach(ioc); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 8835befe2c0..d72aa6182bc 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -193,21 +193,11 @@ struct mvs_prd { #define SPI_ADDR_VLD_94XX (1U << 1) #define SPI_CTRL_SpiStart_94XX (1U << 0) -#define mv_ffc(x) ffz(x) - static inline int mv_ffc64(u64 v) { - int i; - i = mv_ffc((u32)v); - if (i >= 0) - return i; - i = mv_ffc((u32)(v>>32)); - - if (i != 0) - return 32 + i; - - return -1; + u64 x = ~v; + return x ? __ffs64(x) : -1; } #define r_reg_set_enable(i) \ diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 1367d8b9350..efc6965ca67 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -73,7 +73,7 @@ extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ ((type == EDGE_DEV) || (type == FANOUT_DEV)) -#define bit(n) ((u32)1 << n) +#define bit(n) ((u64)1 << n) #define for_each_phy(__lseq_mask, __mc, __lseq) \ for ((__mc) = (__lseq_mask), (__lseq) = 0; \ diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index b31a8e3841d..d4ed9eb5265 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -69,10 +69,10 @@ #ifndef SCSI_OSD_MAJOR # define SCSI_OSD_MAJOR 260 #endif -#define SCSI_OSD_MAX_MINOR 64 +#define SCSI_OSD_MAX_MINOR MINORMASK static const char osd_name[] = "osd"; -static const char *osd_version_string = "open-osd 0.2.0"; +static const char *osd_version_string = "open-osd 0.2.1"; MODULE_AUTHOR("Boaz Harrosh "); MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a2a1a831b5f..7e78020b355 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3406,9 +3406,9 @@ qla2x00_do_dpc(void *data) base_vha->host_no)); } - if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) { + if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, + &base_vha->dpc_flags)) { qla2x00_update_fcports(base_vha); - clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); } if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a4b9cdbaaa0..198e4cda1e6 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -41,6 +41,8 @@ #include +static void scsi_eh_done(struct scsi_cmnd *scmd); + #define SENSE_TIMEOUT (10*HZ) /* @@ -240,6 +242,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (! scsi_command_normalize_sense(scmd, &sshdr)) return FAILED; /* no valid sense data */ + if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) + /* + * nasty: for mid-layer issued TURs, we need to return the + * actual sense data without any recovery attempt. For eh + * issued ones, we need to try to recover and interpret + */ + return SUCCESS; + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; @@ -1665,6 +1675,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost) * requests are started. */ scsi_run_host_queues(shost); + + /* + * if eh is active and host_eh_scheduled is pending we need to re-run + * recovery. we do this check after scsi_run_host_queues() to allow + * everything pent up since the last eh run a chance to make forward + * progress before we sync again. Either we'll immediately re-run + * recovery or scsi_device_unbusy() will wake us again when these + * pending commands complete. + */ + spin_lock_irqsave(shost->host_lock, flags); + if (shost->host_eh_scheduled) + if (scsi_host_set_state(shost, SHOST_RECOVERY)) + WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)); + spin_unlock_irqrestore(shost->host_lock, flags); } /** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 72ab1e6da64..dd454c4f99a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -481,15 +481,26 @@ void scsi_requeue_run_queue(struct work_struct *work) */ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) { + struct scsi_device *sdev = cmd->device; struct request *req = cmd->request; unsigned long flags; + /* + * We need to hold a reference on the device to avoid the queue being + * killed after the unlock and before scsi_run_queue is invoked which + * may happen because scsi_unprep_request() puts the command which + * releases its reference on the device. + */ + get_device(&sdev->sdev_gendev); + spin_lock_irqsave(q->queue_lock, flags); scsi_unprep_request(req); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q); + + put_device(&sdev->sdev_gendev); } void scsi_next_command(struct scsi_cmnd *cmd) @@ -1380,16 +1391,19 @@ static int scsi_lld_busy(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; - struct scsi_target *starget; if (!sdev) return 0; shost = sdev->host; - starget = scsi_target(sdev); - if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || - scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) + /* + * Ignore host/starget busy state. + * Since block layer does not have a concept of fairness across + * multiple queues, congestion of host/starget needs to be handled + * in SCSI layer. + */ + if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) return 1; return 0; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index d70e91ae60a..122a5a2020a 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -6,6 +6,7 @@ */ #include +#include #include #include @@ -68,6 +69,19 @@ static int scsi_bus_resume_common(struct device *dev) return err; } +static int scsi_bus_prepare(struct device *dev) +{ + if (scsi_is_sdev_device(dev)) { + /* sd probing uses async_schedule. Wait until it finishes. */ + async_synchronize_full(); + + } else if (scsi_is_host_device(dev)) { + /* Wait until async scanning is finished */ + scsi_complete_async_scans(); + } + return 0; +} + static int scsi_bus_suspend(struct device *dev) { return scsi_bus_suspend_common(dev, PMSG_SUSPEND); @@ -86,6 +100,7 @@ static int scsi_bus_poweroff(struct device *dev) #else /* CONFIG_PM_SLEEP */ #define scsi_bus_resume_common NULL +#define scsi_bus_prepare NULL #define scsi_bus_suspend NULL #define scsi_bus_freeze NULL #define scsi_bus_poweroff NULL @@ -194,6 +209,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost) #endif /* CONFIG_PM_RUNTIME */ const struct dev_pm_ops scsi_bus_pm_ops = { + .prepare = scsi_bus_prepare, .suspend = scsi_bus_suspend, .resume = scsi_bus_resume_common, .freeze = scsi_bus_freeze, diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 2a588955423..5b475d0832c 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -110,6 +110,7 @@ extern void scsi_exit_procfs(void); #endif /* CONFIG_PROC_FS */ /* scsi_scan.c */ +extern int scsi_complete_async_scans(void); extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, int); extern void scsi_forget_host(struct Scsi_Host *); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index b3c6d957fbd..c6c80c9c960 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -776,6 +776,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->model = (char *) (sdev->inquiry + 16); sdev->rev = (char *) (sdev->inquiry + 32); + if (strncmp(sdev->vendor, "ATA ", 8) == 0) { + /* + * sata emulation layer device. This is a hack to work around + * the SATL power management specifications which state that + * when the SATL detects the device has gone into standby + * mode, it shall respond with NOT READY. + */ + sdev->allow_restart = 1; + } + if (*bflags & BLIST_ISROM) { sdev->type = TYPE_ROM; sdev->removable = 1; @@ -1710,6 +1720,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost) { struct scsi_device *sdev; shost_for_each_device(sdev, shost) { + /* target removed before the device could be added */ + if (sdev->sdev_state == SDEV_DEL) + continue; if (!scsi_host_scan_allowed(shost) || scsi_sysfs_add_sdev(sdev) != 0) __scsi_remove_device(sdev); @@ -1815,6 +1828,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data) } spin_unlock(&async_scan_lock); + scsi_autopm_put_host(shost); scsi_host_put(shost); kfree(data); } @@ -1841,7 +1855,6 @@ static int do_scan_async(void *_data) do_scsi_scan_host(shost); scsi_finish_async_scan(data); - scsi_autopm_put_host(shost); return 0; } @@ -1869,7 +1882,7 @@ void scsi_scan_host(struct Scsi_Host *shost) p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); if (IS_ERR(p)) do_scan_async(data); - /* scsi_autopm_put_host(shost) is called in do_scan_async() */ + /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ } EXPORT_SYMBOL(scsi_scan_host); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index e0bd3f790fc..51d823f5465 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -962,7 +962,6 @@ static void __scsi_remove_target(struct scsi_target *starget) struct scsi_device *sdev; spin_lock_irqsave(shost->host_lock, flags); - starget->reap_ref++; restart: list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->channel != starget->channel || @@ -976,14 +975,6 @@ static void __scsi_remove_target(struct scsi_target *starget) goto restart; } spin_unlock_irqrestore(shost->host_lock, flags); - scsi_target_reap(starget); -} - -static int __remove_child (struct device * dev, void * data) -{ - if (scsi_is_target_device(dev)) - __scsi_remove_target(to_scsi_target(dev)); - return 0; } /** @@ -996,14 +987,32 @@ static int __remove_child (struct device * dev, void * data) */ void scsi_remove_target(struct device *dev) { - if (scsi_is_target_device(dev)) { - __scsi_remove_target(to_scsi_target(dev)); - return; + struct Scsi_Host *shost = dev_to_shost(dev->parent); + struct scsi_target *starget, *last = NULL; + unsigned long flags; + + /* remove targets being careful to lookup next entry before + * deleting the last + */ + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(starget, &shost->__targets, siblings) { + if (starget->state == STARGET_DEL) + continue; + if (starget->dev.parent == dev || &starget->dev == dev) { + /* assuming new targets arrive at the end */ + starget->reap_ref++; + spin_unlock_irqrestore(shost->host_lock, flags); + if (last) + scsi_target_reap(last); + last = starget; + __scsi_remove_target(starget); + spin_lock_irqsave(shost->host_lock, flags); + } } + spin_unlock_irqrestore(shost->host_lock, flags); - get_device(dev); - device_for_each_child(dev, NULL, __remove_child); - put_device(dev); + if (last) + scsi_target_reap(last); } EXPORT_SYMBOL(scsi_remove_target); diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c index 74708fcaf82..ae781487461 100644 --- a/drivers/scsi/scsi_wait_scan.c +++ b/drivers/scsi/scsi_wait_scan.c @@ -12,7 +12,7 @@ #include #include -#include +#include "scsi_priv.h" static int __init wait_scan_init(void) { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 953773cb26d..7db7eb74dc7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -138,6 +138,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + const char *temp = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -146,6 +147,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -158,6 +166,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -1073,6 +1088,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n", disk->disk_name, cmd)); + error = scsi_verify_blk_ioctl(bdev, cmd); + if (error < 0) + return error; + /* * If we are in the middle of error recovery, don't let anyone * else try and use this device. Also, if error recovery fails, it @@ -1095,7 +1114,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, error = scsi_ioctl(sdp, cmd, p); break; default: - error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p); + error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); if (error != -ENOTTY) break; error = scsi_ioctl(sdp, cmd, p); @@ -1265,6 +1284,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; + int ret; + + ret = scsi_verify_blk_ioctl(bdev, cmd); + if (ret < 0) + return -ENOIOCTLCMD; /* * If we are in the middle of error recovery, don't let anyone @@ -1276,8 +1300,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, return -ENODEV; if (sdev->host->hostt->compat_ioctl) { - int ret; - ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); return ret; @@ -2029,6 +2051,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2510,6 +2536,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; @@ -2819,10 +2846,6 @@ static int __init init_sd(void) if (err) goto err_out; - err = scsi_register_driver(&sd_template.gendrv); - if (err) - goto err_out_class; - sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE, 0, 0, NULL); if (!sd_cdb_cache) { @@ -2836,8 +2859,15 @@ static int __init init_sd(void) goto err_out_cache; } + err = scsi_register_driver(&sd_template.gendrv); + if (err) + goto err_out_driver; + return 0; +err_out_driver: + mempool_destroy(sd_cdb_pool); + err_out_cache: kmem_cache_destroy(sd_cdb_cache); @@ -2860,10 +2890,10 @@ static void __exit exit_sd(void) SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n")); + scsi_unregister_driver(&sd_template.gendrv); mempool_destroy(sd_cdb_pool); kmem_cache_destroy(sd_cdb_cache); - scsi_unregister_driver(&sd_template.gendrv); class_unregister(&sd_disk_class); for (i = 0; i < SD_MAJORS; i++) diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 6ad798bfd52..812f1baa54e 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -70,6 +70,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index b4543f575f4..36d1ed7817e 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev) struct sym_lcb *lp = sym_lp(tp, sdev->lun); unsigned long flags; + /* if slave_alloc returned before allocating a sym_lcb, return */ + if (!lp) + return; + spin_lock_irqsave(np->s.host->host_lock, flags); if (lp->busy_itlq || lp->busy_itl) { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index a1684df9ea5..5998a8b9e22 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -318,7 +318,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) } spi->master = master; - spi->dev.parent = dev; + spi->dev.parent = &master->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; device_initialize(&spi->dev); diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi_fsl_spi.c index 7963c9b4956..a725b07276e 100644 --- a/drivers/spi/spi_fsl_spi.c +++ b/drivers/spi/spi_fsl_spi.c @@ -139,10 +139,12 @@ static void fsl_spi_change_mode(struct spi_device *spi) static void fsl_spi_chipselect(struct spi_device *spi, int value) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; + struct fsl_spi_platform_data *pdata; bool pol = spi->mode & SPI_CS_HIGH; struct spi_mpc8xxx_cs *cs = spi->controller_state; + pdata = spi->dev.parent->parent->platform_data; + if (value == BITBANG_CS_INACTIVE) { if (pdata->cs_control) pdata->cs_control(spi, !pol); @@ -934,7 +936,7 @@ static struct spi_master * __devinit fsl_spi_probe(struct device *dev, static void fsl_spi_cs_control(struct spi_device *spi, bool on) { - struct device *dev = spi->dev.parent; + struct device *dev = spi->dev.parent->parent; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); u16 cs = spi->chip_select; int gpio = pinfo->gpios[cs]; diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c index 7bb7da7959a..63bafbb0980 100644 --- a/drivers/staging/asus_oled/asus_oled.c +++ b/drivers/staging/asus_oled/asus_oled.c @@ -355,7 +355,14 @@ static void send_data(struct asus_oled_dev *odev) static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count) { - while (count-- > 0 && val) { + odev->last_val = val; + + if (val == 0) { + odev->buf_offs += count; + return 0; + } + + while (count-- > 0) { size_t x = odev->buf_offs % odev->width; size_t y = odev->buf_offs / odev->width; size_t i; @@ -406,7 +413,6 @@ static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count) ; } - odev->last_val = val; odev->buf_offs++; } @@ -805,10 +811,9 @@ static int __init asus_oled_init(void) static void __exit asus_oled_exit(void) { + usb_deregister(&oled_driver); class_remove_file(oled_class, &class_attr_version.attr); class_destroy(oled_class); - - usb_deregister(&oled_driver); } module_init(asus_oled_init); diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.c b/drivers/staging/brcm80211/brcmsmac/wlc_bmac.c index 45349261061..934e7f9a867 100644 --- a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.c +++ b/drivers/staging/brcm80211/brcmsmac/wlc_bmac.c @@ -143,7 +143,6 @@ static bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw); static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw); static void wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init); static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw); -static void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags); static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw); static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw); static u32 wlc_wlintrsoff(struct wlc_info *wlc); @@ -2725,7 +2724,7 @@ void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask) W_REG(&wlc_hw->regs->macintmask, wlc->macintmask); } -static void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags) +void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags) { u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h b/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h index a5dccc273ac..a2a4e7328ee 100644 --- a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h +++ b/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h @@ -103,6 +103,7 @@ extern void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk); extern void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw); extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags); extern void wlc_bmac_reset(struct wlc_hw_info *wlc_hw); +extern void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags); extern void wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec, bool mute); extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw); diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_main.c b/drivers/staging/brcm80211/brcmsmac/wlc_main.c index 4b4a31eff90..99250e29461 100644 --- a/drivers/staging/brcm80211/brcmsmac/wlc_main.c +++ b/drivers/staging/brcm80211/brcmsmac/wlc_main.c @@ -6145,6 +6145,7 @@ wlc_recvctl(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p) { int len_mpdu; struct ieee80211_rx_status rx_status; + struct ieee80211_hdr *hdr; memset(&rx_status, 0, sizeof(rx_status)); prep_mac80211_status(wlc, rxh, p, &rx_status); @@ -6154,6 +6155,13 @@ wlc_recvctl(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p) skb_pull(p, D11_PHY_HDR_LEN); __skb_trim(p, len_mpdu); + /* unmute transmit */ + if (wlc->hw->suspended_fifos) { + hdr = (struct ieee80211_hdr *)p->data; + if (ieee80211_is_beacon(hdr->frame_control)) + wlc_bmac_mute(wlc->hw, false, 0); + } + memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); return; diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig index 20008a4376e..727b207d8d7 100644 --- a/drivers/staging/comedi/Kconfig +++ b/drivers/staging/comedi/Kconfig @@ -424,6 +424,7 @@ config COMEDI_ADQ12B config COMEDI_NI_AT_A2150 tristate "NI AT-A2150 ISA card support" + select COMEDI_FC depends on COMEDI_NI_COMMON depends on VIRT_TO_BUS default N diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 63e50f76182..ee33cba59a4 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -136,8 +136,16 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, /* Device config is special, because it must work on * an unconfigured device. */ if (cmd == COMEDI_DEVCONFIG) { + if (minor >= COMEDI_NUM_BOARD_MINORS) { + /* Device config not appropriate on non-board minors. */ + rc = -ENOTTY; + goto done; + } rc = do_devconfig_ioctl(dev, (struct comedi_devconfig __user *)arg); + if (rc == 0) + /* Evade comedi_auto_unconfig(). */ + dev_file_info->hardware_device = NULL; goto done; } @@ -280,7 +288,7 @@ static int do_devconfig_ioctl(struct comedi_device *dev, if (ret == 0) { if (!try_module_get(dev->driver->module)) { comedi_device_detach(dev); - return -ENOSYS; + ret = -ENOSYS; } } @@ -843,7 +851,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn, ret = -EAGAIN; break; } - ret = s->async->inttrig(dev, s, insn->data[0]); + ret = s->async->inttrig(dev, s, data[0]); if (ret >= 0) ret = 1; break; @@ -1088,7 +1096,6 @@ static int do_cmd_ioctl(struct comedi_device *dev, goto cleanup; } - kfree(async->cmd.chanlist); async->cmd = user_cmd; async->cmd.data = NULL; /* load channel/gain list */ @@ -1570,7 +1577,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait) mask = 0; read_subdev = comedi_get_read_subdevice(dev_file_info); - if (read_subdev) { + if (read_subdev && read_subdev->async) { poll_wait(file, &read_subdev->async->wait_head, wait); if (!read_subdev->busy || comedi_buf_read_n_available(read_subdev->async) > 0 @@ -1580,7 +1587,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait) } } write_subdev = comedi_get_write_subdevice(dev_file_info); - if (write_subdev) { + if (write_subdev && write_subdev->async) { poll_wait(file, &write_subdev->async->wait_head, wait); comedi_buf_write_alloc(write_subdev->async, write_subdev->async->prealloc_bufsz); @@ -1622,7 +1629,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, } s = comedi_get_write_subdevice(dev_file_info); - if (s == NULL) { + if (s == NULL || s->async == NULL) { retval = -EIO; goto done; } @@ -1733,7 +1740,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, } s = comedi_get_read_subdevice(dev_file_info); - if (s == NULL) { + if (s == NULL || s->async == NULL) { retval = -EIO; goto done; } @@ -1833,6 +1840,8 @@ void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s) if (async) { comedi_reset_async_buf(async); async->inttrig = NULL; + kfree(async->cmd.chanlist); + async->cmd.chanlist = NULL; } else { printk(KERN_ERR "BUG: (?) do_become_nonbusy called with async=0\n"); @@ -2206,6 +2215,7 @@ int comedi_alloc_board_minor(struct device *hardware_device) kfree(info); return -ENOMEM; } + info->hardware_device = hardware_device; comedi_device_init(info->device); spin_lock_irqsave(&comedi_file_info_table_lock, flags); for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i) { @@ -2295,6 +2305,23 @@ void comedi_free_board_minor(unsigned minor) } } +int comedi_find_board_minor(struct device *hardware_device) +{ + int minor; + struct comedi_device_file_info *info; + + for (minor = 0; minor < COMEDI_NUM_BOARD_MINORS; minor++) { + spin_lock(&comedi_file_info_table_lock); + info = comedi_file_info_table[minor]; + if (info && info->hardware_device == hardware_device) { + spin_unlock(&comedi_file_info_table_lock); + return minor; + } + spin_unlock(&comedi_file_info_table_lock); + } + return -ENODEV; +} + int comedi_alloc_subdevice_minor(struct comedi_device *dev, struct comedi_subdevice *s) { diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h index 68aa9176d24..5f2745e5974 100644 --- a/drivers/staging/comedi/comedidev.h +++ b/drivers/staging/comedi/comedidev.h @@ -237,6 +237,7 @@ struct comedi_device_file_info { struct comedi_device *device; struct comedi_subdevice *read_subdevice; struct comedi_subdevice *write_subdevice; + struct device *hardware_device; }; #ifdef CONFIG_COMEDI_DEBUG diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index 6d60e91b3a8..f9b028604cb 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -819,25 +819,14 @@ static int comedi_auto_config(struct device *hardware_device, int minor; struct comedi_device_file_info *dev_file_info; int retval; - unsigned *private_data = NULL; - if (!comedi_autoconfig) { - dev_set_drvdata(hardware_device, NULL); + if (!comedi_autoconfig) return 0; - } minor = comedi_alloc_board_minor(hardware_device); if (minor < 0) return minor; - private_data = kmalloc(sizeof(unsigned), GFP_KERNEL); - if (private_data == NULL) { - retval = -ENOMEM; - goto cleanup; - } - *private_data = minor; - dev_set_drvdata(hardware_device, private_data); - dev_file_info = comedi_get_device_file_info(minor); memset(&it, 0, sizeof(it)); @@ -850,25 +839,22 @@ static int comedi_auto_config(struct device *hardware_device, retval = comedi_device_attach(dev_file_info->device, &it); mutex_unlock(&dev_file_info->device->mutex); -cleanup: - if (retval < 0) { - kfree(private_data); + if (retval < 0) comedi_free_board_minor(minor); - } return retval; } static void comedi_auto_unconfig(struct device *hardware_device) { - unsigned *minor = (unsigned *)dev_get_drvdata(hardware_device); - if (minor == NULL) - return; - - BUG_ON(*minor >= COMEDI_NUM_BOARD_MINORS); + int minor; - comedi_free_board_minor(*minor); - dev_set_drvdata(hardware_device, NULL); - kfree(minor); + if (hardware_device == NULL) + return; + minor = comedi_find_board_minor(hardware_device); + if (minor < 0) + return; + BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS); + comedi_free_board_minor(minor); } int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name) diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c index 48246cd50d4..b4311bf66e6 100644 --- a/drivers/staging/comedi/drivers/amplc_pc236.c +++ b/drivers/staging/comedi/drivers/amplc_pc236.c @@ -470,7 +470,7 @@ static int pc236_detach(struct comedi_device *dev) { printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, PC236_DRIVER_NAME); - if (devpriv) + if (dev->iobase) pc236_intr_disable(dev); if (dev->irq) diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c index a804742b802..2567f9ab416 100644 --- a/drivers/staging/comedi/drivers/comedi_test.c +++ b/drivers/staging/comedi/drivers/comedi_test.c @@ -461,7 +461,7 @@ static int waveform_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->timer_running = 0; - del_timer(&devpriv->timer); + del_timer_sync(&devpriv->timer); return 0; } diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c index 3141dc80fe7..966b6936443 100644 --- a/drivers/staging/comedi/drivers/das08.c +++ b/drivers/staging/comedi/drivers/das08.c @@ -655,7 +655,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev, int chan; lsb = data[0] & 0xff; - msb = (data[0] >> 8) & 0xf; + msb = (data[0] >> 8) & 0xff; chan = CR_CHAN(insn->chanspec); diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index 8d98cf41270..c8b7eed07f9 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -913,7 +913,7 @@ static int jr3_pci_attach(struct comedi_device *dev, } /* Reset DSP card */ - devpriv->iobase->channel[0].reset = 0; + writel(0, &devpriv->iobase->channel[0].reset); result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware); printk("Firmare load %d\n", result); diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index ab8f37022a3..897359d79e5 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -1241,7 +1241,9 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) else channel = CR_CHAN(cmd->chanlist[0]); /* munge channel bits for differential / scan disabled mode */ - if (labpc_ai_scan_mode(cmd) != MODE_SINGLE_CHAN && aref == AREF_DIFF) + if ((labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN || + labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN_INTERVAL) && + aref == AREF_DIFF) channel *= 2; devpriv->command1_bits |= ADC_CHAN_BITS(channel); devpriv->command1_bits |= thisboard->ai_range_code[range]; @@ -1257,21 +1259,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->write_byte(devpriv->command1_bits, dev->iobase + COMMAND1_REG); } - /* setup any external triggering/pacing (command4 register) */ - devpriv->command4_bits = 0; - if (cmd->convert_src != TRIG_EXT) - devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; - /* XXX should discard first scan when using interval scanning - * since manual says it is not synced with scan clock */ - if (labpc_use_continuous_mode(cmd) == 0) { - devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; - if (cmd->scan_begin_src == TRIG_EXT) - devpriv->command4_bits |= EXT_SCAN_EN_BIT; - } - /* single-ended/differential */ - if (aref == AREF_DIFF) - devpriv->command4_bits |= ADC_DIFF_BIT; - devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); devpriv->write_byte(cmd->chanlist_len, dev->iobase + INTERVAL_COUNT_REG); @@ -1349,6 +1336,22 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT; devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG); + /* setup any external triggering/pacing (command4 register) */ + devpriv->command4_bits = 0; + if (cmd->convert_src != TRIG_EXT) + devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT; + /* XXX should discard first scan when using interval scanning + * since manual says it is not synced with scan clock */ + if (labpc_use_continuous_mode(cmd) == 0) { + devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT; + if (cmd->scan_begin_src == TRIG_EXT) + devpriv->command4_bits |= EXT_SCAN_EN_BIT; + } + /* single-ended/differential */ + if (aref == AREF_DIFF) + devpriv->command4_bits |= ADC_DIFF_BIT; + devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG); + /* startup acquisition */ /* command2 reg */ diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 23fc64b9988..42cad5c0729 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c @@ -1882,7 +1882,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) case TRIG_NONE: /* continous acquisition */ devpriv->ai_continous = 1; - devpriv->ai_sample_count = 0; + devpriv->ai_sample_count = 1; break; } @@ -2370,7 +2370,7 @@ static int s626_enc_insn_config(struct comedi_device *dev, /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */ k->SetMode(dev, k, Setup, TRUE); - Preload(dev, k, *(insn->data)); + Preload(dev, k, data[0]); k->PulseIndex(dev, k); SetLatchSource(dev, k, valueSrclatch); k->SetEnable(dev, k, (uint16_t) (enab != 0)); diff --git a/drivers/staging/comedi/internal.h b/drivers/staging/comedi/internal.h index 434ce343336..4208fb4cf0f 100644 --- a/drivers/staging/comedi/internal.h +++ b/drivers/staging/comedi/internal.h @@ -7,6 +7,7 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); int comedi_alloc_board_minor(struct device *hardware_device); void comedi_free_board_minor(unsigned minor); +int comedi_find_board_minor(struct device *hardware_device); void comedi_reset_async_buf(struct comedi_async *async); int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s, unsigned long new_size); diff --git a/drivers/staging/hv/hv_kvp.c b/drivers/staging/hv/hv_kvp.c index 13b0ecf7d5d..9f8efd4c5b6 100644 --- a/drivers/staging/hv/hv_kvp.c +++ b/drivers/staging/hv/hv_kvp.c @@ -201,11 +201,13 @@ kvp_respond_to_host(char *key, char *value, int error) * The windows host expects the key/value pair to be encoded * in utf16. */ - keylen = utf8s_to_utf16s(key_name, strlen(key_name), - (wchar_t *)kvp_data->data.key); + keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN, + (wchar_t *) kvp_data->data.key, + HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2); kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */ - valuelen = utf8s_to_utf16s(value, strlen(value), - (wchar_t *)kvp_data->data.value); + valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN, + (wchar_t *) kvp_data->data.value, + HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2); kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */ kvp_data->data.value_type = REG_SZ; /* all our values are strings */ diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c index dd9a3bb6aa0..a1176d91312 100644 --- a/drivers/staging/iio/magnetometer/hmc5843.c +++ b/drivers/staging/iio/magnetometer/hmc5843.c @@ -520,7 +520,9 @@ static int hmc5843_detect(struct i2c_client *client, /* Called when we have found a new HMC5843. */ static void hmc5843_init_client(struct i2c_client *client) { - struct hmc5843_data *data = i2c_get_clientdata(client); + struct iio_dev *indio_dev = i2c_get_clientdata(client); + struct hmc5843_data *data = iio_priv(indio_dev); + hmc5843_set_meas_conf(client, data->meas_conf); hmc5843_set_rate(client, data->rate); hmc5843_configure(client, data->operating_mode); diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c index 805df913bb6..21cbc9ae79c 100644 --- a/drivers/staging/lirc/lirc_serial.c +++ b/drivers/staging/lirc/lirc_serial.c @@ -836,25 +836,22 @@ static int hardware_init_port(void) return 0; } -static int init_port(void) +static int __devinit lirc_serial_probe(struct platform_device *dev) { int i, nlow, nhigh, result; result = request_irq(irq, irq_handler, IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0), LIRC_DRIVER_NAME, (void *)&hardware); - - switch (result) { - case -EBUSY: - printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq); - return -EBUSY; - case -EINVAL: - printk(KERN_ERR LIRC_DRIVER_NAME - ": Bad irq number or handler\n"); - return -EINVAL; - default: - break; - }; + if (result < 0) { + if (result == -EBUSY) + printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", + irq); + else if (result == -EINVAL) + printk(KERN_ERR LIRC_DRIVER_NAME + ": Bad irq number or handler\n"); + return result; + } /* Reserve io region. */ /* @@ -875,11 +872,14 @@ static int init_port(void) ": or compile the serial port driver as module and\n"); printk(KERN_WARNING LIRC_DRIVER_NAME ": make sure this module is loaded first\n"); - return -EBUSY; + result = -EBUSY; + goto exit_free_irq; } - if (hardware_init_port() < 0) - return -EINVAL; + if (hardware_init_port() < 0) { + result = -EINVAL; + goto exit_release_region; + } /* Initialize pulse/space widths */ init_timing_params(duty_cycle, freq); @@ -911,6 +911,28 @@ static int init_port(void) dprintk("Interrupt %d, port %04x obtained\n", irq, io); return 0; + +exit_release_region: + if (iommap != 0) + release_mem_region(iommap, 8 << ioshift); + else + release_region(io, 8); +exit_free_irq: + free_irq(irq, (void *)&hardware); + + return result; +} + +static int __devexit lirc_serial_remove(struct platform_device *dev) +{ + free_irq(irq, (void *)&hardware); + + if (iommap != 0) + release_mem_region(iommap, 8 << ioshift); + else + release_region(io, 8); + + return 0; } static int set_use_inc(void *data) @@ -1076,16 +1098,6 @@ static struct lirc_driver driver = { static struct platform_device *lirc_serial_dev; -static int __devinit lirc_serial_probe(struct platform_device *dev) -{ - return 0; -} - -static int __devexit lirc_serial_remove(struct platform_device *dev) -{ - return 0; -} - static int lirc_serial_suspend(struct platform_device *dev, pm_message_t state) { @@ -1112,10 +1124,8 @@ static int lirc_serial_resume(struct platform_device *dev) { unsigned long flags; - if (hardware_init_port() < 0) { - lirc_serial_exit(); + if (hardware_init_port() < 0) return -EINVAL; - } spin_lock_irqsave(&hardware[type].lock, flags); /* Enable Interrupt */ @@ -1188,10 +1198,6 @@ static int __init lirc_serial_init_module(void) { int result; - result = lirc_serial_init(); - if (result) - return result; - switch (type) { case LIRC_HOMEBREW: case LIRC_IRDEO: @@ -1211,8 +1217,7 @@ static int __init lirc_serial_init_module(void) break; #endif default: - result = -EINVAL; - goto exit_serial_exit; + return -EINVAL; } if (!softcarrier) { switch (type) { @@ -1228,37 +1233,26 @@ static int __init lirc_serial_init_module(void) } } - result = init_port(); - if (result < 0) - goto exit_serial_exit; + result = lirc_serial_init(); + if (result) + return result; + driver.features = hardware[type].features; driver.dev = &lirc_serial_dev->dev; driver.minor = lirc_register_driver(&driver); if (driver.minor < 0) { printk(KERN_ERR LIRC_DRIVER_NAME ": register_chrdev failed!\n"); - result = -EIO; - goto exit_release; + lirc_serial_exit(); + return -EIO; } return 0; -exit_release: - release_region(io, 8); -exit_serial_exit: - lirc_serial_exit(); - return result; } static void __exit lirc_serial_exit_module(void) { - lirc_serial_exit(); - - free_irq(irq, (void *)&hardware); - - if (iommap != 0) - release_mem_region(iommap, 8 << ioshift); - else - release_region(io, 8); lirc_unregister_driver(driver.minor); + lirc_serial_exit(); dprintk("cleaned up module\n"); } diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c index 0d3864594b1..39bb66b79fe 100644 --- a/drivers/staging/lirc/lirc_sir.c +++ b/drivers/staging/lirc/lirc_sir.c @@ -53,6 +53,7 @@ #include #include #include +#include #ifdef LIRC_ON_SA1100 #include #ifdef CONFIG_SA1100_COLLIE @@ -488,9 +489,11 @@ static struct lirc_driver driver = { .owner = THIS_MODULE, }; +static struct platform_device *lirc_sir_dev; static int init_chrdev(void) { + driver.dev = &lirc_sir_dev->dev; driver.minor = lirc_register_driver(&driver); if (driver.minor < 0) { printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n"); @@ -1216,20 +1219,71 @@ static int init_lirc_sir(void) return 0; } +static int __devinit lirc_sir_probe(struct platform_device *dev) +{ + return 0; +} + +static int __devexit lirc_sir_remove(struct platform_device *dev) +{ + return 0; +} + +static struct platform_driver lirc_sir_driver = { + .probe = lirc_sir_probe, + .remove = __devexit_p(lirc_sir_remove), + .driver = { + .name = "lirc_sir", + .owner = THIS_MODULE, + }, +}; static int __init lirc_sir_init(void) { int retval; + retval = platform_driver_register(&lirc_sir_driver); + if (retval) { + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register " + "failed!\n"); + return -ENODEV; + } + + lirc_sir_dev = platform_device_alloc("lirc_dev", 0); + if (!lirc_sir_dev) { + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc " + "failed!\n"); + retval = -ENOMEM; + goto pdev_alloc_fail; + } + + retval = platform_device_add(lirc_sir_dev); + if (retval) { + printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add " + "failed!\n"); + retval = -ENODEV; + goto pdev_add_fail; + } + retval = init_chrdev(); if (retval < 0) - return retval; + goto fail; + retval = init_lirc_sir(); if (retval) { drop_chrdev(); - return retval; + goto fail; } + return 0; + +fail: + platform_device_del(lirc_sir_dev); +pdev_add_fail: + platform_device_put(lirc_sir_dev); +pdev_alloc_fail: + platform_driver_unregister(&lirc_sir_driver); + return retval; } static void __exit lirc_sir_exit(void) @@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void) drop_hardware(); drop_chrdev(); drop_port(); + platform_device_unregister(lirc_sir_dev); + platform_driver_unregister(&lirc_sir_driver); printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n"); } diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c index 1f0949ed7ee..30a9c62a823 100644 --- a/drivers/staging/rtl8712/recv_linux.c +++ b/drivers/staging/rtl8712/recv_linux.c @@ -113,13 +113,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter, if (skb == NULL) goto _recv_indicatepkt_drop; skb->data = precv_frame->u.hdr.rx_data; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail - - precv_frame->u.hdr.rx_head); -#else - skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail; -#endif skb->len = precv_frame->u.hdr.len; + skb_set_tail_pointer(skb, skb->len); if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1)) skb->ip_summed = CHECKSUM_UNNECESSARY; else diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c index 6cb7e28c99a..af28a62c4e5 100644 --- a/drivers/staging/rtl8712/usb_intf.c +++ b/drivers/staging/rtl8712/usb_intf.c @@ -62,6 +62,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = { {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */ /* Belkin */ {USB_DEVICE(0x050D, 0x945A)}, + /* ISY IWL - Belkin clone */ + {USB_DEVICE(0x050D, 0x11F1)}, /* Corega */ {USB_DEVICE(0x07AA, 0x0047)}, /* D-Link */ @@ -86,6 +88,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = { {USB_DEVICE(0x0DF6, 0x0045)}, {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */ {USB_DEVICE(0x0DF6, 0x004B)}, + {USB_DEVICE(0x0DF6, 0x005B)}, {USB_DEVICE(0x0DF6, 0x005D)}, {USB_DEVICE(0x0DF6, 0x0063)}, /* Sweex */ diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 42fcf7e9cb6..59a6d4da873 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -1855,7 +1855,7 @@ static void speakup_bits(struct vc_data *vc) static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key) { - static u_char *goto_buf = "\0\0\0\0\0\0"; + static u_char goto_buf[8]; static int num; int maxlen, go_pos; char *cp; diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index a2c3dc4098b..e76a882243a 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -40,7 +40,7 @@ static int softsynth_is_alive(struct spk_synth *synth); static unsigned char get_index(void); static struct miscdevice synth_device; -static int initialized; +static int init_pos; static int misc_registered; static struct var_t vars[] = { @@ -194,7 +194,7 @@ static int softsynth_close(struct inode *inode, struct file *fp) unsigned long flags; spk_lock(flags); synth_soft.alive = 0; - initialized = 0; + init_pos = 0; spk_unlock(flags); /* Make sure we let applications go before leaving */ speakup_start_ttys(); @@ -239,13 +239,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count, ch = '\x18'; } else if (synth_buffer_empty()) { break; - } else if (!initialized) { - if (*init) { - ch = *init; - init++; - } else { - initialized = 1; - } + } else if (init[init_pos]) { + ch = init[init_pos++]; } else { ch = synth_buffer_getc(); } diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index c241074a4b5..7843111555e 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -342,7 +342,7 @@ int synth_init(char *synth_name) mutex_lock(&spk_mutex); /* First, check if we already have it loaded. */ - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++) if (strcmp(synths[i]->name, synth_name) == 0) synth = synths[i]; @@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth) int i; int status = 0; mutex_lock(&spk_mutex); - for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++) + for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++) /* synth_remove() is responsible for rotating the array down */ if (in_synth == synths[i]) { mutex_unlock(&spk_mutex); diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 433a3b6207d..1547cf23250 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c @@ -761,26 +761,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso); * buffer and iso packets need to be stored and be in propeper endian in urb * before calling this function */ -int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) +void usbip_pad_iso(struct usbip_device *ud, struct urb *urb) { int np = urb->number_of_packets; int i; - int ret; int actualoffset = urb->actual_length; if (!usb_pipeisoc(urb->pipe)) - return 0; + return; /* if no packets or length of data is 0, then nothing to unpack */ if (np == 0 || urb->actual_length == 0) - return 0; + return; /* * if actual_length is transfer_buffer_length then no padding is * present. */ if (urb->actual_length == urb->transfer_buffer_length) - return 0; + return; /* * loop over all packets from last to first (to prevent overwritting @@ -792,8 +791,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) urb->transfer_buffer + actualoffset, urb->iso_frame_desc[i].actual_length); } - - return ret; } EXPORT_SYMBOL_GPL(usbip_pad_iso); diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h index 4a641c552b7..072743ece5e 100644 --- a/drivers/staging/usbip/usbip_common.h +++ b/drivers/staging/usbip/usbip_common.h @@ -327,7 +327,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); /* some members of urb must be substituted before. */ -int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); +void usbip_pad_iso(struct usbip_device *ud, struct urb *urb); void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); /* usbip_event.c */ diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index 5c4b5d94450..c851433f5df 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, return; /* restore the padding in iso packets */ - if (usbip_pad_iso(ud, urb) < 0) - return; + usbip_pad_iso(ud, urb); if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h index a8f97ebb659..991ce3eecd8 100644 --- a/drivers/staging/vt6656/bssdb.h +++ b/drivers/staging/vt6656/bssdb.h @@ -92,7 +92,6 @@ typedef struct tagSRSNCapObject { } SRSNCapObject, *PSRSNCapObject; // BSS info(AP) -#pragma pack(1) typedef struct tagKnownBSS { // BSS info BOOL bActive; diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c index cb817ced518..5918ef7038b 100644 --- a/drivers/staging/vt6656/dpc.c +++ b/drivers/staging/vt6656/dpc.c @@ -200,7 +200,7 @@ s_vProcessRxMACHeader ( } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) { cbHeaderSize += 6; pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize); - if ((*pwType == cpu_to_le16(ETH_P_IPX)) || + if ((*pwType == cpu_to_be16(ETH_P_IPX)) || (*pwType == cpu_to_le16(0xF380))) { cbHeaderSize -= 8; pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize); @@ -1256,7 +1256,7 @@ static BOOL s_bHandleRxEncryption ( PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); } else { @@ -1367,7 +1367,7 @@ static BOOL s_bHostWepRxEncryption ( PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16); if (byDecMode == KEY_CTL_TKIP) { *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV)); diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c index 51b5adf3657..df8ea25c5c6 100644 --- a/drivers/staging/vt6656/hostap.c +++ b/drivers/staging/vt6656/hostap.c @@ -153,7 +153,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - kfree(pDevice->apdev); + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = FALSE; pDevice->bEnableHostWEP = FALSE; diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h index 3176c8d08d6..c731b120d98 100644 --- a/drivers/staging/vt6656/int.h +++ b/drivers/staging/vt6656/int.h @@ -34,7 +34,6 @@ #include "device.h" /*--------------------- Export Definitions -------------------------*/ -#pragma pack(1) typedef struct tagSINTData { BYTE byTSR0; BYTE byPkt0; diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h index 22710cef751..ae6e2d237b2 100644 --- a/drivers/staging/vt6656/iocmd.h +++ b/drivers/staging/vt6656/iocmd.h @@ -95,13 +95,12 @@ typedef enum tagWZONETYPE { // Ioctl interface structure // Command structure // -#pragma pack(1) typedef struct tagSCmdRequest { u8 name[16]; void *data; u16 wResult; u16 wCmdCode; -} SCmdRequest, *PSCmdRequest; +} __packed SCmdRequest, *PSCmdRequest; // // Scan @@ -111,7 +110,7 @@ typedef struct tagSCmdScan { u8 ssid[SSID_MAXLEN + 2]; -} SCmdScan, *PSCmdScan; +} __packed SCmdScan, *PSCmdScan; // // BSS Join @@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin { BOOL bPSEnable; BOOL bShareKeyAuth; -} SCmdBSSJoin, *PSCmdBSSJoin; +} __packed SCmdBSSJoin, *PSCmdBSSJoin; // // Zonetype Setting @@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet { BOOL bWrite; WZONETYPE ZoneType; -} SCmdZoneTypeSet, *PSCmdZoneTypeSet; +} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet; typedef struct tagSWPAResult { char ifname[100]; @@ -145,7 +144,7 @@ typedef struct tagSWPAResult { u8 key_mgmt; u8 eap_type; BOOL authenticated; -} SWPAResult, *PSWPAResult; +} __packed SWPAResult, *PSWPAResult; typedef struct tagSCmdStartAP { @@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP { BOOL bShareKeyAuth; u8 byBasicRate; -} SCmdStartAP, *PSCmdStartAP; +} __packed SCmdStartAP, *PSCmdStartAP; typedef struct tagSCmdSetWEP { @@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP { BOOL bWepKeyAvailable[WEP_NKEYS]; u32 auWepKeyLength[WEP_NKEYS]; -} SCmdSetWEP, *PSCmdSetWEP; +} __packed SCmdSetWEP, *PSCmdSetWEP; typedef struct tagSBSSIDItem { @@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem { BOOL bWEPOn; u32 uRSSI; -} SBSSIDItem; +} __packed SBSSIDItem; typedef struct tagSBSSIDList { u32 uItem; SBSSIDItem sBSSIDList[0]; -} SBSSIDList, *PSBSSIDList; +} __packed SBSSIDList, *PSBSSIDList; typedef struct tagSNodeItem { @@ -208,7 +207,7 @@ typedef struct tagSNodeItem { u32 uTxAttempts; u16 wFailureRatio; -} SNodeItem; +} __packed SNodeItem; typedef struct tagSNodeList { @@ -216,7 +215,7 @@ typedef struct tagSNodeList { u32 uItem; SNodeItem sNodeList[0]; -} SNodeList, *PSNodeList; +} __packed SNodeList, *PSNodeList; typedef struct tagSCmdLinkStatus { @@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus { u32 uChannel; u32 uLinkRate; -} SCmdLinkStatus, *PSCmdLinkStatus; +} __packed SCmdLinkStatus, *PSCmdLinkStatus; // // 802.11 counter @@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount { u32 ReceivedFragmentCount; u32 MulticastReceivedFrameCount; u32 FCSErrorCount; -} SDot11MIBCount, *PSDot11MIBCount; +} __packed SDot11MIBCount, *PSDot11MIBCount; @@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount { u32 ullTxBroadcastBytes[2]; u32 ullTxMulticastBytes[2]; u32 ullTxDirectedBytes[2]; -} SStatMIBCount, *PSStatMIBCount; +} __packed SStatMIBCount, *PSStatMIBCount; typedef struct tagSCmdValue { u32 dwValue; -} SCmdValue, *PSCmdValue; +} __packed SCmdValue, *PSCmdValue; // // hostapd & viawget ioctl related @@ -431,7 +430,7 @@ struct viawget_hostapd_param { u8 ssid[32]; } scan_req; } u; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h index 959c8868f6e..2522ddec718 100644 --- a/drivers/staging/vt6656/iowpa.h +++ b/drivers/staging/vt6656/iowpa.h @@ -67,12 +67,11 @@ enum { -#pragma pack(1) typedef struct viawget_wpa_header { u8 type; u16 req_ie_len; u16 resp_ie_len; -} viawget_wpa_header; +} __packed viawget_wpa_header; struct viawget_wpa_param { u32 cmd; @@ -113,9 +112,8 @@ struct viawget_wpa_param { u8 *buf; } scan_results; } u; -}; +} __packed; -#pragma pack(1) struct viawget_scan_result { u8 bssid[6]; u8 ssid[32]; @@ -130,7 +128,7 @@ struct viawget_scan_result { int noise; int level; int maxrate; -}; +} __packed; /*--------------------- Export Classes ----------------------------*/ diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c index 27bb523c8a9..fd93e837c70 100644 --- a/drivers/staging/vt6656/key.c +++ b/drivers/staging/vt6656/key.c @@ -223,7 +223,7 @@ BOOL KeybSetKey( PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -235,7 +235,8 @@ BOOL KeybSetKey( PSKeyItem pKey; unsigned int uKeyIdx; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Enter KeybSetKey: %X\n", dwKeyIndex); j = (MAX_KEY_TABLE-1); for (i=0;i<(MAX_KEY_TABLE-1);i++) { @@ -261,7 +262,9 @@ BOOL KeybSetKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); @@ -302,9 +305,12 @@ BOOL KeybSetKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", + pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", + pKey->wTSC15_0); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", + pKey->dwKeyIndex); return (TRUE); } @@ -326,7 +332,9 @@ BOOL KeybSetKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(N)[%X]: %d\n", + pTable->KeyTable[j].dwGTKeyIndex, j); } pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4); @@ -367,9 +375,11 @@ BOOL KeybSetKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", + pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", + pKey->dwKeyIndex); return (TRUE); } @@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType, DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n", + pTable->KeyTable[i].dwGTKeyIndex); return (TRUE); } @@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -693,7 +704,10 @@ BOOL KeybSetDefaultKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, + MAX_KEY_TABLE-1); } pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed @@ -744,9 +758,11 @@ BOOL KeybSetDefaultKey( } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n", + pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n", + pKey->dwKeyIndex); return (TRUE); } @@ -772,7 +788,7 @@ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -784,7 +800,8 @@ BOOL KeybSetAllGroupKey( PSKeyItem pKey; unsigned int uKeyIdx; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n", + dwKeyIndex); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key @@ -801,7 +818,9 @@ BOOL KeybSetAllGroupKey( if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "Group transmit key(R)[%X]: %d\n", + pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h index f749c7a027d..bd35d39621a 100644 --- a/drivers/staging/vt6656/key.h +++ b/drivers/staging/vt6656/key.h @@ -58,7 +58,7 @@ typedef struct tagSKeyItem { BOOL bKeyValid; - unsigned long uKeyLength; + u32 uKeyLength; BYTE abyKey[MAX_KEY_LEN]; QWORD KeyRSC; DWORD dwTSC47_16; @@ -107,7 +107,7 @@ BOOL KeybSetKey( PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode @@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, - unsigned long uKeyLength, + u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c index 26c19d1408c..0636d824825 100644 --- a/drivers/staging/vt6656/mac.c +++ b/drivers/staging/vt6656/mac.c @@ -262,7 +262,8 @@ BYTE pbyData[24]; dwData1 <<= 16; dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5)); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\ + " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl); //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); @@ -279,7 +280,8 @@ BYTE pbyData[24]; dwData2 <<= 8; dwData2 |= *(pbyAddr+0); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n", + wOffset, dwData2); //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset); //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index e18efd43e3e..5fb56a65482 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -222,7 +222,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode"); // Static vars definitions // -static struct usb_device_id vt6656_table[] __devinitdata = { +static struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; @@ -725,8 +725,6 @@ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message) if (device->flags & DEVICE_FLAGS_OPENED) device_close(device->dev); - usb_put_dev(interface_to_usbdev(intf)); - return 0; } @@ -737,8 +735,6 @@ static int vt6656_resume(struct usb_interface *intf) if (!device || !device->dev) return -ENODEV; - usb_get_dev(interface_to_usbdev(intf)); - if (!(device->flags & DEVICE_FLAGS_OPENED)) device_open(device->dev); diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c index 3fd0478a9a5..8cf0881888b 100644 --- a/drivers/staging/vt6656/rf.c +++ b/drivers/staging/vt6656/rf.c @@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr; return TRUE; } + if (uCH == 0) + return -EINVAL; + switch (uRATE) { case RATE_1M: case RATE_2M: diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index 9b64b102f55..3beb126e90f 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -377,7 +377,8 @@ s_vFillTxKey ( *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV // Append IV&ExtIV after Mac Header *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n", + *pdwExtIV); } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) { pTransmitKey->wTSC15_0++; @@ -1701,7 +1702,7 @@ s_bPacketToWirelessUsb( // 802.1H if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) { if (pDevice->dwDiagRefCount == 0) { - if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) || + if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) || (psEthHeader->wType == cpu_to_le16(0xF380))) { memcpy((PBYTE) (pbyPayloadHead), abySNAP_Bridgetunnel, 6); @@ -1753,7 +1754,8 @@ s_bPacketToWirelessUsb( MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((PBYTE)&dwMIC_Priority, 4); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n", + dwMICKey0, dwMICKey1); /////////////////////////////////////////////////////////////////// @@ -2635,7 +2637,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) { MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12); dwMIC_Priority = 0; MIC_vAppend((PBYTE)&dwMIC_Priority, 4); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\ + " %X, %X\n", dwMICKey0, dwMICKey1); uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen; @@ -2655,7 +2658,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen); - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n", + *pdwMIC_L, *pdwMIC_R); } @@ -2840,10 +2844,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) Packet_Type = skb->data[ETH_HLEN+1]; Descriptor_type = skb->data[ETH_HLEN+1+1+2]; Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]); - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) { - /* 802.1x OR eapol-key challenge frame transfer */ - if (((Protocol_Version == 1) || (Protocol_Version == 2)) && - (Packet_Type == 3)) { + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) { + /* 802.1x OR eapol-key challenge frame transfer */ + if (((Protocol_Version == 1) || (Protocol_Version == 2)) && + (Packet_Type == 3)) { bTxeapol_key = TRUE; if(!(Key_info & BIT3) && //WPA or RSN group-key challenge (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key @@ -2989,19 +2993,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) } } - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) { - if (pDevice->byBBType != BB_TYPE_11A) { - pDevice->wCurrentRate = RATE_1M; - pDevice->byACKRate = RATE_1M; - pDevice->byTopCCKBasicRate = RATE_1M; - pDevice->byTopOFDMBasicRate = RATE_6M; - } else { - pDevice->wCurrentRate = RATE_6M; - pDevice->byACKRate = RATE_6M; - pDevice->byTopCCKBasicRate = RATE_1M; - pDevice->byTopOFDMBasicRate = RATE_6M; - } - } + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) { + if (pDevice->byBBType != BB_TYPE_11A) { + pDevice->wCurrentRate = RATE_1M; + pDevice->byACKRate = RATE_1M; + pDevice->byTopCCKBasicRate = RATE_1M; + pDevice->byTopOFDMBasicRate = RATE_6M; + } else { + pDevice->wCurrentRate = RATE_6M; + pDevice->byACKRate = RATE_6M; + pDevice->byTopCCKBasicRate = RATE_1M; + pDevice->byTopOFDMBasicRate = RATE_6M; + } + } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n", @@ -3017,7 +3021,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) if (bNeedEncryption == TRUE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType)); - if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) { + if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) { bNeedEncryption = FALSE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType)); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { @@ -3029,7 +3033,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n"); } else { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n", + pTransmitKey->dwKeyIndex); bNeedEncryption = TRUE; } } @@ -3043,7 +3048,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) if (pDevice->bEnableHostWEP) { if ((uNodeIndex != 0) && (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n", + pTransmitKey->dwKeyIndex); bNeedEncryption = TRUE; } } diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h index 8e9450ef399..dfbf74713a8 100644 --- a/drivers/staging/vt6656/ttype.h +++ b/drivers/staging/vt6656/ttype.h @@ -29,6 +29,8 @@ #ifndef __TTYPE_H__ #define __TTYPE_H__ +#include + /******* Common definitions and typedefs ***********************************/ typedef int BOOL; @@ -42,17 +44,17 @@ typedef int BOOL; /****** Simple typedefs ***************************************************/ -typedef unsigned char BYTE; // 8-bit -typedef unsigned short WORD; // 16-bit -typedef unsigned long DWORD; // 32-bit +typedef u8 BYTE; +typedef u16 WORD; +typedef u32 DWORD; // QWORD is for those situation that we want // an 8-byte-aligned 8 byte long structure // which is NOT really a floating point number. typedef union tagUQuadWord { struct { - DWORD dwLowDword; - DWORD dwHighDword; + u32 dwLowDword; + u32 dwHighDword; } u; double DoNotUseThisField; } UQuadWord; @@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit /****** Common pointer types ***********************************************/ -typedef unsigned long ULONG_PTR; // 32-bit -typedef unsigned long DWORD_PTR; // 32-bit +typedef u32 ULONG_PTR; +typedef u32 DWORD_PTR; // boolean pointer diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index c612ab58f38..f759352a268 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -168,6 +168,11 @@ int PIPEnsControlOut( if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; + if (pDevice->Flags & fMP_CONTROL_READS) + return STATUS_FAILURE; + + MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); + pDevice->sUsbCtlRequest.bRequestType = 0x40; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); @@ -182,12 +187,13 @@ int PIPEnsControlOut( ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus); + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "control send request submission failed: %d\n", + ntStatus); + MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); return STATUS_FAILURE; } - else { - MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); - } + spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { @@ -227,6 +233,11 @@ int PIPEnsControlIn( if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; + if (pDevice->Flags & fMP_CONTROL_WRITES) + return STATUS_FAILURE; + + MP_SET_FLAG(pDevice, fMP_CONTROL_READS); + pDevice->sUsbCtlRequest.bRequestType = 0xC0; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); @@ -240,10 +251,11 @@ int PIPEnsControlIn( ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { - DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus); - }else { - MP_SET_FLAG(pDevice, fMP_CONTROL_READS); - } + DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO + "control request submission failed: %d\n", ntStatus); + MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); + return STATUS_FAILURE; + } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c index 78ea121b7e2..31fb96a54cf 100644 --- a/drivers/staging/vt6656/wcmd.c +++ b/drivers/staging/vt6656/wcmd.c @@ -316,17 +316,19 @@ s_MgrMakeProbeRequest( return pTxPacket; } -void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond) +void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond) { - PSDevice pDevice = (PSDevice)hDeviceContext; + PSDevice pDevice = (PSDevice)hDeviceContext; - init_timer(&pDevice->sTimerCommand); - pDevice->sTimerCommand.data = (unsigned long)pDevice; - pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; - // RUN_AT :1 msec ~= (HZ/1024) - pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10); - add_timer(&pDevice->sTimerCommand); - return; + init_timer(&pDevice->sTimerCommand); + + pDevice->sTimerCommand.data = (unsigned long)pDevice; + pDevice->sTimerCommand.function = (TimerFunction)vRunCommand; + pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000); + + add_timer(&pDevice->sTimerCommand); + + return; } void vRunCommand(void *hDeviceContext) diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h index 46c295905b4..c359252a6b0 100644 --- a/drivers/staging/vt6656/wpa2.h +++ b/drivers/staging/vt6656/wpa2.h @@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo { } PMKIDInfo, *PPMKIDInfo; typedef struct tagSPMKIDCache { - unsigned long BSSIDInfoCount; - PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE]; + u32 BSSIDInfoCount; + PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE]; } SPMKIDCache, *PSPMKIDCache; diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index 3724e1e67ec..02f9eb83680 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -24,7 +24,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); -static const struct usb_device_id wb35_table[] __devinitconst = { +static const struct usb_device_id wb35_table[] = { { USB_DEVICE(0x0416, 0x0035) }, { USB_DEVICE(0x18E8, 0x6201) }, { USB_DEVICE(0x18E8, 0x6206) }, diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4b5421b391d..f7c3cfbbaba 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -905,6 +905,9 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) sc->result = SAM_STAT_GOOD; set_host_byte(sc, DID_OK); + if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || + (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) + scsi_set_resid(sc, se_cmd->residual_count); sc->scsi_done(sc); return 0; } @@ -930,6 +933,9 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) sc->result = se_cmd->scsi_status; set_host_byte(sc, DID_OK); + if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || + (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) + scsi_set_resid(sc, se_cmd->residual_count); sc->scsi_done(sc); return 0; } diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 86b36600acb..527bda488df 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -238,8 +239,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * changed. */ if (primary) { - tg_pt_id = ((ptr[2] << 8) & 0xff); - tg_pt_id |= (ptr[3] & 0xff); + tg_pt_id = get_unaligned_be16(ptr + 2); /* * Locate the matching target port group ID from * the global tg_pt_gp list @@ -280,8 +280,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * the Target Port in question for the the incoming * SET_TARGET_PORT_GROUPS op. */ - rtpi = ((ptr[2] << 8) & 0xff); - rtpi |= (ptr[3] & 0xff); + rtpi = get_unaligned_be16(ptr + 2); /* * Locate the matching relative target port identifer * for the struct se_device storage object. @@ -352,6 +351,7 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: + return 0; case MAINTENANCE_IN: switch (cdb[1]) { case MI_REPORT_TARGET_PGS: @@ -394,6 +394,7 @@ static inline int core_alua_state_unavailable( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1]) { case MI_REPORT_TARGET_PGS: @@ -434,6 +435,7 @@ static inline int core_alua_state_transition( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1]) { case MI_REPORT_TARGET_PGS: diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7f19c8b7b84..05584010e70 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -83,6 +83,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd) buf[1] = 0x80; buf[2] = dev->transport->get_device_rev(dev); + /* + * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 + * + * SPC4 says: + * A RESPONSE DATA FORMAT field set to 2h indicates that the + * standard INQUIRY data is in the format defined in this + * standard. Response data format values less than 2h are + * obsolete. Response data format values greater than 2h are + * reserved. + */ + buf[3] = 2; + /* * Enable SCCS and TPGS fields for Emulated ALUA */ @@ -94,7 +106,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) return 0; } - buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ + buf[7] = 0x2; /* CmdQue=1 */ /* * Do not include vendor, product, reversion info in INQUIRY diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 25c1f49a7d8..26f4d5b2cbe 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -3234,7 +3234,8 @@ static int __init target_core_init_configfs(void) if (ret < 0) goto out; - if (core_dev_setup_virtual_lun0() < 0) + ret = core_dev_setup_virtual_lun0(); + if (ret < 0) goto out; return 0; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 07ab5a3bb8e..6246f28eac4 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -355,6 +355,14 @@ static struct config_group *target_fabric_make_mappedlun( ret = -EINVAL; goto out; } + if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { + pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG" + "-1: %u for Target Portal Group: %u\n", mapped_lun, + TRANSPORT_MAX_LUNS_PER_TPG-1, + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); + ret = -EINVAL; + goto out; + } lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, config_item_name(acl_ci), &ret); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b662db3a320..98e12d31c9c 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -471,6 +471,7 @@ static int core_scsi3_pr_seq_non_holder( case READ_MEDIA_SERIAL_NUMBER: case REPORT_LUNS: case REQUEST_SENSE: + case PERSISTENT_RESERVE_IN: ret = 0; /*/ Allowed CDBs */ break; default: @@ -3079,7 +3080,7 @@ static int core_scsi3_pro_preempt( if (!(calling_it_nexus)) core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, - ASCQ_2AH_RESERVATIONS_PREEMPTED); + ASCQ_2AH_REGISTRATIONS_PREEMPTED); } spin_unlock(&pr_tmpl->registration_lock); /* @@ -3191,7 +3192,7 @@ static int core_scsi3_pro_preempt( * additional sense code set to REGISTRATIONS PREEMPTED; */ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, - ASCQ_2AH_RESERVATIONS_PREEMPTED); + ASCQ_2AH_REGISTRATIONS_PREEMPTED); } spin_unlock(&pr_tmpl->registration_lock); /* diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 331d423fd0e..af0c500ff15 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1210,7 +1210,6 @@ static int __pscsi_map_task_SG( bio = NULL; } - page++; len -= bytes; data_len -= bytes; off = 0; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 1340ffd7648..910c8b0cd73 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3672,15 +3672,20 @@ static int transport_generic_cmd_sequencer( /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ goto out_invalid_cdb_field; } - + /* + * For the overflow case keep the existing fabric provided + * ->data_length. Otherwise for the underflow case, reset + * ->data_length to the smaller SCSI expected data transfer + * length. + */ if (size > cmd->data_length) { cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; cmd->residual_count = (size - cmd->data_length); } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = (cmd->data_length - size); + cmd->data_length = size; } - cmd->data_length = size; } transport_set_supported_SAM_opcode(cmd); @@ -5668,6 +5673,8 @@ int transport_send_check_condition_and_sense( case TCM_SECTOR_COUNT_TOO_MANY: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID COMMAND OPERATION CODE */ @@ -5676,6 +5683,7 @@ int transport_send_check_condition_and_sense( case TCM_UNKNOWN_MODE_PAGE: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN CDB */ @@ -5684,6 +5692,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_ABORT_CMD: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* BUS DEVICE RESET FUNCTION OCCURRED */ @@ -5693,6 +5702,7 @@ int transport_send_check_condition_and_sense( case TCM_INCORRECT_AMOUNT_OF_DATA: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* WRITE ERROR */ @@ -5703,22 +5713,25 @@ int transport_send_check_condition_and_sense( case TCM_INVALID_CDB_FIELD: /* CURRENT ERROR */ buffer[offset] = 0x70; - /* ABORTED COMMAND */ - buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN CDB */ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24; break; case TCM_INVALID_PARAMETER_LIST: /* CURRENT ERROR */ buffer[offset] = 0x70; - /* ABORTED COMMAND */ - buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* INVALID FIELD IN PARAMETER LIST */ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26; break; case TCM_UNEXPECTED_UNSOLICITED_DATA: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* WRITE ERROR */ @@ -5729,6 +5742,7 @@ int transport_send_check_condition_and_sense( case TCM_SERVICE_CRC_ERROR: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* PROTOCOL SERVICE CRC ERROR */ @@ -5739,6 +5753,7 @@ int transport_send_check_condition_and_sense( case TCM_SNACK_REJECTED: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ABORTED COMMAND */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; /* READ ERROR */ @@ -5749,6 +5764,7 @@ int transport_send_check_condition_and_sense( case TCM_WRITE_PROTECTED: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* DATA PROTECT */ buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; /* WRITE PROTECTED */ @@ -5757,6 +5773,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_UNIT_ATTENTION: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* UNIT ATTENTION */ buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); @@ -5766,6 +5783,7 @@ int transport_send_check_condition_and_sense( case TCM_CHECK_CONDITION_NOT_READY: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* Not Ready */ buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY; transport_get_sense_codes(cmd, &asc, &ascq); @@ -5776,6 +5794,7 @@ int transport_send_check_condition_and_sense( default: /* CURRENT ERROR */ buffer[offset] = 0x70; + buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10; /* ILLEGAL REQUEST */ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; /* LOGICAL UNIT COMMUNICATION FAILURE */ diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index b2a106729d4..3c3fa84e9ef 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -371,10 +371,12 @@ static void ft_send_resp_status(struct fc_lport *lport, fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); sp = fr_seq(fp); - if (sp) + if (sp) { lport->tt.seq_send(lport, sp, fp); - else + lport->tt.exch_done(sp); + } else { lport->tt.frame_send(lport, fp); + } } /* diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 7491e21cc6a..9a084b8d197 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -64,7 +64,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport) struct ft_tport *tport; int i; - tport = rcu_dereference(lport->prov[FC_TYPE_FCP]); + tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP], + lockdep_is_held(&ft_lport_lock)); if (tport && tport->tpg) return tport; @@ -392,11 +393,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, tport = ft_tport_create(rdata->local_port); if (!tport) - return 0; /* not a target for this local port */ + goto not_target; /* not a target for this local port */ acl = ft_acl_get(tport->tpg, rdata); if (!acl) - return 0; + goto not_target; /* no target for this remote */ if (!rspp) goto fill; @@ -433,12 +434,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len, /* * OR in our service parameters with other provider (initiator), if any. - * TBD XXX - indicate RETRY capability? */ fill: fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_RETRY; spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN); return FC_SPP_RESP_ACK; + +not_target: + fcp_parm = ntohl(spp->spp_params); + fcp_parm &= ~FCP_SPPF_TARG_FCN; + spp->spp_params = htonl(fcp_parm); + return 0; } /** @@ -467,7 +474,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu) { struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu); - transport_deregister_session(sess->se_sess); kfree(sess); } @@ -475,6 +481,7 @@ static void ft_sess_free(struct kref *kref) { struct ft_sess *sess = container_of(kref, struct ft_sess, kref); + transport_deregister_session(sess->se_sess); call_rcu(&sess->rcu, ft_sess_rcu_free); } diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index d5f923bcdff..e1abb45c3cf 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -3190,12 +3190,12 @@ static void ixj_write_cid(IXJ *j) ixj_fsk_alloc(j); - strcpy(sdmf1, j->cid_send.month); - strcat(sdmf1, j->cid_send.day); - strcat(sdmf1, j->cid_send.hour); - strcat(sdmf1, j->cid_send.min); - strcpy(sdmf2, j->cid_send.number); - strcpy(sdmf3, j->cid_send.name); + strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); + strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); + strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); len1 = strlen(sdmf1); len2 = strlen(sdmf2); @@ -3340,12 +3340,12 @@ static void ixj_write_cidcw(IXJ *j) ixj_pre_cid(j); } j->flags.cidcw_ack = 0; - strcpy(sdmf1, j->cid_send.month); - strcat(sdmf1, j->cid_send.day); - strcat(sdmf1, j->cid_send.hour); - strcat(sdmf1, j->cid_send.min); - strcpy(sdmf2, j->cid_send.number); - strcpy(sdmf3, j->cid_send.name); + strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1)); + strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1)); + strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2)); + strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3)); len1 = strlen(sdmf1); len2 = strlen(sdmf2); diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index ca71b6de648..5018d182a9a 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -1462,6 +1462,7 @@ static int __init thermal_init(void) idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); + return result; } result = genetlink_init(); return result; diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 220579592c2..34111486baa 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -1113,8 +1113,10 @@ static int set_serial_info(struct async_struct * info, (new_serial.close_delay != state->close_delay) || (new_serial.xmit_fifo_size != state->xmit_fifo_size) || ((new_serial.flags & ~ASYNC_USR_MASK) != - (state->flags & ~ASYNC_USR_MASK))) + (state->flags & ~ASYNC_USR_MASK))) { + tty_unlock(); return -EPERM; + } state->flags = ((state->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); info->flags = ((info->flags & ~ASYNC_USR_MASK) | diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index ba679ce0a77..8f82f7ab354 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -1330,7 +1330,7 @@ static void moxa_start(struct tty_struct *tty) if (ch == NULL) return; - if (!(ch->statusflags & TXSTOPPED)) + if (!test_bit(TXSTOPPED, &ch->statusflags)) return; MoxaPortTxEnable(ch); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index c0d34addc2d..fee6bedee8a 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -842,7 +842,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { - dlci->skb = skb_dequeue(&dlci->skb_list); + dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) return 0; first = 1; @@ -866,8 +866,11 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, /* FIXME: need a timer or something to kick this so it can't get stuck with no work outstanding and no buffer free */ - if (msg == NULL) + if (msg == NULL) { + skb_queue_tail(&dlci->skb_list, dlci->skb); + dlci->skb = NULL; return -ENOMEM; + } dp = msg->data; if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ @@ -1152,6 +1155,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { u8 buf[1]; + unsigned long flags; + switch (command) { case CMD_CLD: { struct gsm_dlci *dlci = gsm->dlci[0]; @@ -1177,7 +1182,9 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, gsm->constipated = 0; gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); /* Kick the link in case it is idling */ + spin_lock_irqsave(&gsm->tx_lock, flags); gsm_data_kick(gsm); + spin_unlock_irqrestore(&gsm->tx_lock, flags); break; case CMD_MSC: /* Out of band modem line change indicator for a DLCI */ @@ -2269,12 +2276,12 @@ static void gsmld_write_wakeup(struct tty_struct *tty) /* Queue poll */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + spin_lock_irqsave(&gsm->tx_lock, flags); gsm_data_kick(gsm); if (gsm->tx_bytes < TX_THRESH_LO) { - spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_data_sweep(gsm); - spin_unlock_irqrestore(&gsm->tx_lock, flags); } + spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index c3954fbf6ac..687cfe3b9b9 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1531,6 +1531,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) tty->real_raw = 0; } n_tty_set_room(tty); + /* + * Fix tty hang when I_IXON(tty) is cleared, but the tty + * been stopped by STOP_CHAR(tty) before it. + */ + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { + start_tty(tty); + } + /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); @@ -1728,7 +1736,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, do_it_again: - BUG_ON(!tty->read_buf); + if (WARN_ON(!tty->read_buf)) + return -EAGAIN; c = job_control(tty, file); if (c < 0) diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index e18604b3fc7..47359285922 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -49,7 +49,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp) tty->packet = 0; if (!tty->link) return; - tty->link->packet = 0; set_bit(TTY_OTHER_CLOSED, &tty->link->flags); wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); @@ -670,6 +669,9 @@ static int ptmx_open(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); + /* We refuse fsnotify events on ptmx, since it's a shared resource */ + filp->f_mode |= FMODE_NONOTIFY; + retval = tty_alloc_file(filp); if (retval) return retval; diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c index 762ce722758..7f50999eebc 100644 --- a/drivers/tty/serial/8250.c +++ b/drivers/tty/serial/8250.c @@ -81,7 +81,7 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ #define DEBUG_INTR(fmt...) do { } while (0) #endif -#define PASS_LIMIT 256 +#define PASS_LIMIT 512 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c index ff48fdb5c0b..3411ed35f1d 100644 --- a/drivers/tty/serial/8250_pci.c +++ b/drivers/tty/serial/8250_pci.c @@ -1011,6 +1011,8 @@ static int pci_eg20t_init(struct pci_dev *dev) #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208 #define PCI_SUBDEVICE_ID_POCTAL232 0x0308 #define PCI_SUBDEVICE_ID_POCTAL422 0x0408 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530 #define PCI_VENDOR_ID_ADVANTECH 0x13fe #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66 #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620 @@ -1459,51 +1461,61 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8811, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8812, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8813, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8814, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x8027, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x8028, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x8029, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x800C, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x800D, .init = pci_eg20t_init, + .setup = pci_default_setup, }, { .vendor = 0x10DB, .device = 0x800D, .init = pci_eg20t_init, + .setup = pci_default_setup, }, /* * Cronyx Omega PCI (PLX-chip based) @@ -2999,8 +3011,11 @@ static struct pci_device_id serial_pci_tbl[] = { * For now just used the hex ID 0x950a. */ { PCI_VENDOR_ID_OXSEMI, 0x950a, - PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0, - pbn_b0_2_115200 }, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00, + 0, 0, pbn_b0_2_115200 }, + { PCI_VENDOR_ID_OXSEMI, 0x950a, + PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30, + 0, 0, pbn_b0_2_115200 }, { PCI_VENDOR_ID_OXSEMI, 0x950a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_2_1130000 }, diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 33f840cb6e5..ff650d6613c 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -458,7 +458,7 @@ config SERIAL_SAMSUNG_UARTS int depends on ARM && PLAT_SAMSUNG default 2 if ARCH_S3C2400 - default 6 if ARCH_S5P6450 + default 6 if CPU_S5P6450 default 4 if SERIAL_SAMSUNG_UARTS_4 default 3 help diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 50bc5a5ac65..37db1d5898e 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c @@ -555,7 +555,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev) res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mem) port->mapbase = res_mem->start; - else if (platp->mapbase) + else if (platp) port->mapbase = platp->mapbase; else return -EINVAL; @@ -563,7 +563,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev) res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res_irq) port->irq = res_irq->start; - else if (platp->irq) + else if (platp) port->irq = platp->irq; /* Check platform data first so we can override device node data */ diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index f5f6831b0a6..7cbb3679b80 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1376,6 +1376,10 @@ static int pl011_startup(struct uart_port *port) uap->port.uartclk = clk_get_rate(uap->clk); + /* Clear pending error and receive interrupts */ + writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | + UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); + /* * Allocate the IRQ */ @@ -1410,10 +1414,6 @@ static int pl011_startup(struct uart_port *port) cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; writew(cr, uap->port.membase + UART011_CR); - /* Clear pending error interrupts */ - writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, - uap->port.membase + UART011_ICR); - /* * initialise the old status of the modem signals */ @@ -1428,6 +1428,9 @@ static int pl011_startup(struct uart_port *port) * as well. */ spin_lock_irq(&uap->port.lock); + /* Clear out any spuriously appearing RX interrupts */ + writew(UART011_RTIS | UART011_RXIS, + uap->port.membase + UART011_ICR); uap->im = UART011_RTIM; if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; @@ -1617,13 +1620,26 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, old_cr &= ~ST_UART011_CR_OVSFACT; } + /* + * Workaround for the ST Micro oversampling variants to + * increase the bitrate slightly, by lowering the divisor, + * to avoid delayed sampling of start bit at high speeds, + * else we see data corruption. + */ + if (uap->vendor->oversampling) { + if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) + quot -= 1; + else if ((baud > 3250000) && (quot > 2)) + quot -= 2; + } /* Set baud rate */ writew(quot & 0x3f, port->membase + UART011_FBRD); writew(quot >> 6, port->membase + UART011_IBRD); /* * ----------v----------v----------v----------v----- - * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L + * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER + * UART011_FBRD & UART011_IBRD. * ----------^----------^----------^----------^----- */ writew(lcr_h, port->membase + uap->lcrh_rx); @@ -1733,9 +1749,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int status, old_cr, new_cr; + unsigned long flags; + int locked = 1; clk_enable(uap->clk); + local_irq_save(flags); + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock(&uap->port.lock); + else + spin_lock(&uap->port.lock); + /* * First save the CR then disable the interrupts */ @@ -1755,6 +1781,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) } while (status & UART01x_FR_BUSY); writew(old_cr, uap->port.membase + UART011_CR); + if (locked) + spin_unlock(&uap->port.lock); + local_irq_restore(flags); + clk_disable(uap->clk); } @@ -1906,6 +1936,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) uap->port.line = i; pl011_dma_probe(uap); + /* Ensure interrupts from this UART are masked and cleared */ + writew(0, uap->port.membase + UART011_IMSC); + writew(0xffff, uap->port.membase + UART011_ICR); + snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); amba_ports[i] = uap; diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index af9b7814965..b989495c763 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -199,8 +199,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int mode; + unsigned long flags; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); /* Disable interrupts */ UART_PUT_IDR(port, atmel_port->tx_done_mask); @@ -231,7 +232,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); - spin_unlock(&port->lock); + spin_unlock_irqrestore(&port->lock, flags); } diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c index e6c3dbd781d..836fe273123 100644 --- a/drivers/tty/serial/clps711x.c +++ b/drivers/tty/serial/clps711x.c @@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id) port->x_char = 0; return IRQ_HANDLED; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - clps711xuart_stop_tx(port); - return IRQ_HANDLED; - } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + goto disable_tx_irq; count = port->fifosize >> 1; do { @@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) - clps711xuart_stop_tx(port); + if (uart_circ_empty(xmit)) { + disable_tx_irq: + disable_irq_nosync(TX_IRQ(port)); + tx_enabled(port) = 0; + } return IRQ_HANDLED; } diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 5315525220f..5d0d4f61cfb 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c @@ -551,6 +551,7 @@ static void ifx_port_shutdown(struct tty_port *port) container_of(port, struct ifx_spi_device, tty_port); mrdy_set_low(ifx_dev); + del_timer(&ifx_dev->spi_timer); clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags); tasklet_kill(&ifx_dev->io_work_tasklet); } diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c index 2aaafa9c58a..6c12d94e6d3 100644 --- a/drivers/tty/serial/jsm/jsm_driver.c +++ b/drivers/tty/serial/jsm/jsm_driver.c @@ -269,6 +269,7 @@ static void jsm_io_resume(struct pci_dev *pdev) struct jsm_board *brd = pci_get_drvdata(pdev); pci_restore_state(pdev); + pci_save_state(pdev); jsm_uart_port_init(brd); } diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 7e02c9c344f..5b3d063a4aa 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -368,6 +368,8 @@ static void mxs_auart_settermios(struct uart_port *u, writel(ctrl, u->membase + AUART_LINECTRL); writel(ctrl2, u->membase + AUART_CTRL2); + + uart_update_timeout(u, termios->c_cflag, baud); } static irqreturn_t mxs_auart_irq_handle(int irq, void *context) diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 902588b2a12..73038baa8b6 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -602,7 +602,8 @@ static void pch_request_dma(struct uart_port *port) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(0xa, 0)); /* Get DMA's dev + dma_dev = pci_get_bus_and_slot(priv->pdev->bus->number, + PCI_DEVFN(0xa, 0)); /* Get DMA's dev information */ /* Set Tx DMA */ param = &priv->param_tx; @@ -657,7 +658,8 @@ static void pch_dma_rx_complete(void *arg) tty_flip_buffer_push(tty); tty_kref_put(tty); async_tx_ack(priv->desc_rx); - pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); } static void pch_dma_tx_complete(void *arg) @@ -712,7 +714,8 @@ static int handle_rx_to(struct eg20t_port *priv) int rx_size; int ret; if (!priv->start_rx) { - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); return 0; } buf = &priv->rxbuf; @@ -974,11 +977,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) case PCH_UART_IID_RDR: /* Received Data Ready */ if (priv->use_dma) { pch_uart_hal_disable_interrupt(priv, - PCH_UART_HAL_RX_INT); + PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); ret = dma_handle_rx(priv); if (!ret) pch_uart_hal_enable_interrupt(priv, - PCH_UART_HAL_RX_INT); + PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); } else { ret = handle_rx(priv); } @@ -1104,7 +1109,8 @@ static void pch_uart_stop_rx(struct uart_port *port) struct eg20t_port *priv; priv = container_of(port, struct eg20t_port, port); priv->start_rx = 0; - pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); priv->int_dis_flag = 1; } @@ -1160,6 +1166,7 @@ static int pch_uart_startup(struct uart_port *port) break; case 16: fifo_size = PCH_UART_HAL_FIFO16; + break; case 1: default: fifo_size = PCH_UART_HAL_FIFO_DIS; @@ -1197,7 +1204,8 @@ static int pch_uart_startup(struct uart_port *port) pch_request_dma(port); priv->start_rx = 1; - pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT); + pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT | + PCH_UART_HAL_RX_ERR_INT); uart_update_timeout(port, CS8, default_baud); return 0; @@ -1255,7 +1263,7 @@ static void pch_uart_set_termios(struct uart_port *port, stb = PCH_UART_HAL_STB1; if (termios->c_cflag & PARENB) { - if (!(termios->c_cflag & PARODD)) + if (termios->c_cflag & PARODD) parity = PCH_UART_HAL_PARITY_ODD; else parity = PCH_UART_HAL_PARITY_EVEN; @@ -1353,9 +1361,11 @@ static int pch_uart_verify_port(struct uart_port *port, __func__); return -EOPNOTSUPP; #endif - priv->use_dma = 1; priv->use_dma_flag = 1; dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n"); + if (!priv->use_dma) + pch_request_dma(port); + priv->use_dma = 1; } return 0; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5cadda5bc3c..768da03f4c1 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1920,6 +1920,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) mutex_unlock(&port->mutex); return 0; } + put_device(tty_dev); + if (console_suspend_enabled || !uart_console(uport)) uport->suspended = 1; @@ -1985,9 +1987,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) disable_irq_wake(uport->irq); uport->irq_wake = 0; } + put_device(tty_dev); mutex_unlock(&port->mutex); return 0; } + put_device(tty_dev); uport->suspended = 0; /* @@ -2332,6 +2336,7 @@ void uart_unregister_driver(struct uart_driver *drv) tty_unregister_driver(p); put_tty_driver(p); kfree(drv->state); + drv->state = NULL; drv->tty_driver = NULL; } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ebd8629c108..bead17e5634 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -953,17 +953,20 @@ static void sci_dma_tx_complete(void *arg) port->icount.tx += sg_dma_len(&s->sg_tx); async_tx_ack(s->desc_tx); - s->cookie_tx = -EINVAL; s->desc_tx = NULL; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (!uart_circ_empty(xmit)) { + s->cookie_tx = 0; schedule_work(&s->work_tx); - } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { - u16 ctrl = sci_in(port, SCSCR); - sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); + } else { + s->cookie_tx = -EINVAL; + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { + u16 ctrl = sci_in(port, SCSCR); + sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); + } } spin_unlock_irqrestore(&port->lock, flags); @@ -1225,8 +1228,10 @@ static void sci_start_tx(struct uart_port *port) } if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && - s->cookie_tx < 0) + s->cookie_tx < 0) { + s->cookie_tx = 0; schedule_work(&s->work_tx); + } #endif if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index 92aa54550e8..bf583fa0c84 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = { #define UART_NR 4 static struct uart_sunsu_port sunsu_ports[UART_NR]; +static int nr_inst; /* Number of already registered ports */ #ifdef CONFIG_SERIO @@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options) printk("Console: ttyS%d (SU)\n", (sunsu_reg.minor - 64) + co->index); - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index >= UART_NR) - co->index = 0; + if (co->index > nr_inst) + return -ENODEV; port = &sunsu_ports[co->index].port; /* @@ -1408,7 +1404,6 @@ static enum su_type __devinit su_get_type(struct device_node *dp) static int __devinit su_probe(struct platform_device *op) { - static int inst; struct device_node *dp = op->dev.of_node; struct uart_sunsu_port *up; struct resource *rp; @@ -1418,16 +1413,16 @@ static int __devinit su_probe(struct platform_device *op) type = su_get_type(dp); if (type == SU_PORT_PORT) { - if (inst >= UART_NR) + if (nr_inst >= UART_NR) return -EINVAL; - up = &sunsu_ports[inst]; + up = &sunsu_ports[nr_inst]; } else { up = kzalloc(sizeof(*up), GFP_KERNEL); if (!up) return -ENOMEM; } - up->port.line = inst; + up->port.line = nr_inst; spin_lock_init(&up->port.lock); @@ -1461,6 +1456,8 @@ static int __devinit su_probe(struct platform_device *op) } dev_set_drvdata(&op->dev, up); + nr_inst++; + return 0; } @@ -1488,7 +1485,7 @@ static int __devinit su_probe(struct platform_device *op) dev_set_drvdata(&op->dev, up); - inst++; + nr_inst++; return 0; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 2fcf34ccb8e..699762ffe9c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -943,6 +943,14 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); +/* We limit tty time update visibility to every 8 seconds or so. */ +static void tty_update_time(struct timespec *time) +{ + unsigned long sec = get_seconds() & ~7; + if ((long)(sec - time->tv_sec) > 0) + time->tv_sec = sec; +} + /** * tty_read - read method for tty device files * @file: pointer to tty file @@ -979,8 +987,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, else i = -EIO; tty_ldisc_deref(ld); + if (i > 0) - inode->i_atime = current_fs_time(inode->i_sb); + tty_update_time(&inode->i_atime); + return i; } @@ -1083,7 +1093,7 @@ static inline ssize_t do_tty_write( } if (written) { struct inode *inode = file->f_path.dentry->d_inode; - inode->i_mtime = current_fs_time(inode->i_sb); + tty_update_time(&inode->i_mtime); ret = written; } out: diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 53f2442c609..30478736cc7 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) - return -EINTR; + return -ERESTARTSYS; } tty_set_termios(tty, &tmp_termios); @@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt) if (opt & TERMIOS_WAIT) { tty_wait_until_sent(tty, 0); if (signal_pending(current)) - return -EINTR; + return -ERESTARTSYS; } mutex_lock(&tty->termios_mutex); diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 33d37d230f8..a4aaca0e014 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c @@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port, int do_clocal = 0, retval; unsigned long flags; DEFINE_WAIT(wait); - int cd; /* block if port is in the process of being closed */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { @@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port, retval = -ERESTARTSYS; break; } - /* Probe the carrier. For devices with no carrier detect this - will always return true */ - cd = tty_port_carrier_raised(port); + /* + * Probe the carrier. For devices with no carrier detect + * tty_port_carrier_raised will always return true. + * Never ask drivers if CLOCAL is set, this causes troubles + * on some hardware. + */ if (!(port->flags & ASYNC_CLOSING) && - (do_clocal || cd)) + (do_clocal || tty_port_carrier_raised(port))) break; if (signal_pending(current)) { retval = -ERESTARTSYS; diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c index 45d3e80156d..f3438083a28 100644 --- a/drivers/tty/vt/consolemap.c +++ b/drivers/tty/vt/consolemap.c @@ -516,6 +516,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) int err = 0, err1, i; struct uni_pagedir *p, *q; + /* Save original vc_unipagdir_loc in case we allocate a new one */ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; if (p->readonly) return -EIO; @@ -528,26 +529,57 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) err1 = con_clear_unimap(vc, NULL); if (err1) return err1; + /* + * Since refcount was > 1, con_clear_unimap() allocated a + * a new uni_pagedir for this vc. Re: p != q + */ q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; - for (i = 0, l = 0; i < 32; i++) + + /* + * uni_pgdir is a 32*32*64 table with rows allocated + * when its first entry is added. The unicode value must + * still be incremented for empty rows. We are copying + * entries from "p" (old) to "q" (new). + */ + l = 0; /* unicode value */ + for (i = 0; i < 32; i++) if ((p1 = p->uni_pgdir[i])) for (j = 0; j < 32; j++) - if ((p2 = p1[j])) + if ((p2 = p1[j])) { for (k = 0; k < 64; k++, l++) if (p2[k] != 0xffff) { + /* + * Found one, copy entry for unicode + * l with fontpos value p2[k]. + */ err1 = con_insert_unipair(q, l, p2[k]); if (err1) { p->refcount++; *vc->vc_uni_pagedir_loc = (unsigned long)p; con_release_unimap(q); kfree(q); - return err1; + return err1; } - } - p = q; - } else if (p == dflt) + } + } else { + /* Account for row of 64 empty entries */ + l += 64; + } + else + /* Account for empty table */ + l += 32 * 64; + + /* + * Finished copying font table, set vc_uni_pagedir to new table + */ + p = q; + } else if (p == dflt) { dflt = NULL; - + } + + /* + * Insert user specified unicode pairs into new table. + */ while (ct--) { unsigned short unicode, fontpos; __get_user(unicode, &list->unicode); @@ -557,11 +589,14 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) list++; } + /* + * Merge with fontmaps of any other virtual consoles. + */ if (con_unify_unimap(vc, p)) return err; for (i = 0; i <= 3; i++) - set_inverse_transl(vc, p, i); /* Update all inverse translations */ + set_inverse_transl(vc, p, i); /* Update inverse translations */ set_inverse_trans_unicode(vc, p); return err; diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 66825c9f516..ab2320112bf 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -92,7 +92,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll) static struct vcs_poll_data * vcs_poll_data_get(struct file *file) { - struct vcs_poll_data *poll = file->private_data; + struct vcs_poll_data *poll = file->private_data, *kill = NULL; if (poll) return poll; @@ -121,10 +121,12 @@ vcs_poll_data_get(struct file *file) file->private_data = poll; } else { /* someone else raced ahead of us */ - vcs_poll_data_free(poll); + kill = poll; poll = file->private_data; } spin_unlock(&file->f_lock); + if (kill) + vcs_poll_data_free(kill); return poll; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index b3915b7ad3e..e41288a0003 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -3016,7 +3016,7 @@ int __init vty_init(const struct file_operations *console_fops) static struct class *vtconsole_class; -static int bind_con_driver(const struct consw *csw, int first, int last, +static int do_bind_con_driver(const struct consw *csw, int first, int last, int deflt) { struct module *owner = csw->owner; @@ -3027,7 +3027,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, if (!try_module_get(owner)) return -ENODEV; - console_lock(); + WARN_CONSOLE_UNLOCKED(); /* check if driver is registered */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3112,11 +3112,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last, retval = 0; err: - console_unlock(); module_put(owner); return retval; }; + +static int bind_con_driver(const struct consw *csw, int first, int last, + int deflt) +{ + int ret; + + console_lock(); + ret = do_bind_con_driver(csw, first, last, deflt); + console_unlock(); + return ret; +} + #ifdef CONFIG_VT_HW_CONSOLE_BINDING static int con_is_graphics(const struct consw *csw, int first, int last) { @@ -3152,6 +3163,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last) * or 0 on success. */ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) +{ + int retval; + + console_lock(); + retval = do_unbind_con_driver(csw, first, last, deflt); + console_unlock(); + return retval; +} +EXPORT_SYMBOL(unbind_con_driver); + +/* unlocked version of unbind_con_driver() */ +int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt) { struct module *owner = csw->owner; const struct consw *defcsw = NULL; @@ -3161,7 +3184,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) if (!try_module_get(owner)) return -ENODEV; - console_lock(); + WARN_CONSOLE_UNLOCKED(); /* check if driver is registered and if it is unbindable */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { @@ -3174,10 +3197,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) } } - if (retval) { - console_unlock(); + if (retval) goto err; - } retval = -ENODEV; @@ -3193,15 +3214,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) } } - if (retval) { - console_unlock(); + if (retval) goto err; - } - if (!con_is_bound(csw)) { - console_unlock(); + if (!con_is_bound(csw)) goto err; - } first = max(first, con_driver->first); last = min(last, con_driver->last); @@ -3228,15 +3245,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) if (!con_is_bound(csw)) con_driver->flag &= ~CON_DRIVER_FLAG_INIT; - console_unlock(); /* ignore return value, binding should not fail */ - bind_con_driver(defcsw, first, last, deflt); + do_bind_con_driver(defcsw, first, last, deflt); err: module_put(owner); return retval; } -EXPORT_SYMBOL(unbind_con_driver); +EXPORT_SYMBOL_GPL(do_unbind_con_driver); static int vt_bind(struct con_driver *con) { @@ -3508,28 +3524,18 @@ int con_debug_leave(void) } EXPORT_SYMBOL_GPL(con_debug_leave); -/** - * register_con_driver - register console driver to console layer - * @csw: console driver - * @first: the first console to take over, minimum value is 0 - * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 - * - * DESCRIPTION: This function registers a console driver which can later - * bind to a range of consoles specified by @first and @last. It will - * also initialize the console driver by calling con_startup(). - */ -int register_con_driver(const struct consw *csw, int first, int last) +static int do_register_con_driver(const struct consw *csw, int first, int last) { struct module *owner = csw->owner; struct con_driver *con_driver; const char *desc; int i, retval = 0; + WARN_CONSOLE_UNLOCKED(); + if (!try_module_get(owner)) return -ENODEV; - console_lock(); - for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; @@ -3582,10 +3588,29 @@ int register_con_driver(const struct consw *csw, int first, int last) } err: - console_unlock(); module_put(owner); return retval; } + +/** + * register_con_driver - register console driver to console layer + * @csw: console driver + * @first: the first console to take over, minimum value is 0 + * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 + * + * DESCRIPTION: This function registers a console driver which can later + * bind to a range of consoles specified by @first and @last. It will + * also initialize the console driver by calling con_startup(). + */ +int register_con_driver(const struct consw *csw, int first, int last) +{ + int retval; + + console_lock(); + retval = do_register_con_driver(csw, first, last); + console_unlock(); + return retval; +} EXPORT_SYMBOL(register_con_driver); /** @@ -3601,9 +3626,18 @@ EXPORT_SYMBOL(register_con_driver); */ int unregister_con_driver(const struct consw *csw) { - int i, retval = -ENODEV; + int retval; console_lock(); + retval = do_unregister_con_driver(csw); + console_unlock(); + return retval; +} +EXPORT_SYMBOL(unregister_con_driver); + +int do_unregister_con_driver(const struct consw *csw) +{ + int i, retval = -ENODEV; /* cannot unregister a bound driver */ if (con_is_bound(csw)) @@ -3629,27 +3663,53 @@ int unregister_con_driver(const struct consw *csw) } } err: - console_unlock(); return retval; } -EXPORT_SYMBOL(unregister_con_driver); +EXPORT_SYMBOL_GPL(do_unregister_con_driver); /* * If we support more console drivers, this function is used * when a driver wants to take over some existing consoles * and become default driver for newly opened ones. * - * take_over_console is basically a register followed by unbind + * take_over_console is basically a register followed by unbind + */ +int do_take_over_console(const struct consw *csw, int first, int last, int deflt) +{ + int err; + + err = do_register_con_driver(csw, first, last); + /* + * If we get an busy error we still want to bind the console driver + * and return success, as we may have unbound the console driver + * but not unregistered it. + */ + if (err == -EBUSY) + err = 0; + if (!err) + do_bind_con_driver(csw, first, last, deflt); + + return err; +} +EXPORT_SYMBOL_GPL(do_take_over_console); + +/* + * If we support more console drivers, this function is used + * when a driver wants to take over some existing consoles + * and become default driver for newly opened ones. + * + * take_over_console is basically a register followed by unbind */ int take_over_console(const struct consw *csw, int first, int last, int deflt) { int err; err = register_con_driver(csw, first, last); - /* if we get an busy error we still want to bind the console driver + /* + * If we get an busy error we still want to bind the console driver * and return success, as we may have unbound the console driver -  * but not unregistered it. - */ + * but not unregistered it. + */ if (err == -EBUSY) err = 0; if (!err) diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 5e096f43bce..65447c5f91d 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop, if (!perm && op->op != KD_FONT_OP_GET) return -EPERM; op->data = compat_ptr(((struct compat_console_font_op *)op)->data); - op->flags |= KD_FONT_FLAG_OLD; i = con_font_op(vc, op); if (i) return i; diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index a845f8b8382..9497171a7a5 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ { int ret, len; __le32 *buf; - int offb, offd; + int offb; + unsigned int offd; const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; int buflen = ((size - 1) / stride + 1 + size * 2) * 4; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 8faa23cd74f..84e69ea2309 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -498,6 +498,14 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) usb_autopm_put_interface(acm->control); + /* + * Unthrottle device in case the TTY was closed while throttled. + */ + spin_lock_irq(&acm->read_lock); + acm->throttled = 0; + acm->throttle_req = 0; + spin_unlock_irq(&acm->read_lock); + if (acm_submit_read_urbs(acm, GFP_KERNEL)) goto bail_out; @@ -554,10 +562,18 @@ static void acm_port_down(struct acm *acm) static void acm_tty_hangup(struct tty_struct *tty) { - struct acm *acm = tty->driver_data; - tty_port_hangup(&acm->port); + struct acm *acm; + mutex_lock(&open_mutex); + acm = tty->driver_data; + + if (!acm) + goto out; + + tty_port_hangup(&acm->port); acm_port_down(acm); + +out: mutex_unlock(&open_mutex); } @@ -744,10 +760,6 @@ static const __u32 acm_tty_speed[] = { 2500000, 3000000, 3500000, 4000000 }; -static const __u8 acm_tty_size[] = { - 5, 6, 7, 8 -}; - static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old) { @@ -764,7 +776,21 @@ static void acm_tty_set_termios(struct tty_struct *tty, newline.bParityType = termios->c_cflag & PARENB ? (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0; - newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4]; + switch (termios->c_cflag & CSIZE) { + case CS5: + newline.bDataBits = 5; + break; + case CS6: + newline.bDataBits = 6; + break; + case CS7: + newline.bDataBits = 7; + break; + case CS8: + default: + newline.bDataBits = 8; + break; + } /* FIXME: Needs to clear unsupported bits in the termios */ acm->clocal = ((termios->c_cflag & CLOCAL) != 0); @@ -1027,7 +1053,8 @@ static int acm_probe(struct usb_interface *intf, } - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) + if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || + control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; @@ -1155,7 +1182,7 @@ static int acm_probe(struct usb_interface *intf, if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + usb_sndintpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else usb_fill_bulk_urb(snd->urb, usb_dev, @@ -1183,6 +1210,8 @@ static int acm_probe(struct usb_interface *intf, i = device_create_file(&intf->dev, &dev_attr_wCountryCodes); if (i < 0) { kfree(acm->country_codes); + acm->country_codes = NULL; + acm->country_code_size = 0; goto skip_countries; } @@ -1191,6 +1220,8 @@ static int acm_probe(struct usb_interface *intf, if (i < 0) { device_remove_file(&intf->dev, &dev_attr_wCountryCodes); kfree(acm->country_codes); + acm->country_codes = NULL; + acm->country_code_size = 0; goto skip_countries; } } @@ -1475,6 +1506,12 @@ static const struct usb_device_id acm_ids[] = { Maybe we should define a new quirk for this. */ }, + { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */ + .driver_info = NO_UNION_NORMAL, + }, + { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */ + .driver_info = NO_UNION_NORMAL, + }, { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 2b9ff518b50..c84b4553208 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -54,9 +54,12 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_POLL_RUNNING 6 #define WDM_RESPONDING 7 #define WDM_SUSPENDING 8 +#define WDM_OVERFLOW 10 #define WDM_MAX 16 +/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ +#define WDM_DEFAULT_BUFSIZE 256 static DEFINE_MUTEX(wdm_mutex); @@ -88,7 +91,8 @@ struct wdm_device { int count; dma_addr_t shandle; dma_addr_t ihandle; - struct mutex lock; + struct mutex wlock; + struct mutex rlock; wait_queue_head_t wait; struct work_struct rxwork; int werr; @@ -105,8 +109,9 @@ static void wdm_out_callback(struct urb *urb) spin_lock(&desc->iuspin); desc->werr = urb->status; spin_unlock(&desc->iuspin); - clear_bit(WDM_IN_USE, &desc->flags); kfree(desc->outbuf); + desc->outbuf = NULL; + clear_bit(WDM_IN_USE, &desc->flags); wake_up(&desc->wait); } @@ -114,6 +119,7 @@ static void wdm_in_callback(struct urb *urb) { struct wdm_device *desc = urb->context; int status = urb->status; + int length = urb->actual_length; spin_lock(&desc->iuspin); clear_bit(WDM_RESPONDING, &desc->flags); @@ -144,9 +150,17 @@ static void wdm_in_callback(struct urb *urb) } desc->rerr = status; - desc->reslength = urb->actual_length; - memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); - desc->length += desc->reslength; + if (length + desc->length > desc->wMaxCommand) { + /* The buffer would overflow */ + set_bit(WDM_OVERFLOW, &desc->flags); + } else { + /* we may already be in overflow */ + if (!test_bit(WDM_OVERFLOW, &desc->flags)) { + memmove(desc->ubuf + desc->length, desc->inbuf, length); + desc->length += length; + desc->reslength = length; + } + } skip_error: wake_up(&desc->wait); @@ -309,7 +323,7 @@ static ssize_t wdm_write if (we < 0) return -EIO; - desc->outbuf = buf = kmalloc(count, GFP_KERNEL); + buf = kmalloc(count, GFP_KERNEL); if (!buf) { rv = -ENOMEM; goto outnl; @@ -323,7 +337,7 @@ static ssize_t wdm_write } /* concurrent writes and disconnect */ - r = mutex_lock_interruptible(&desc->lock); + r = mutex_lock_interruptible(&desc->wlock); rv = -ERESTARTSYS; if (r) { kfree(buf); @@ -373,10 +387,12 @@ static ssize_t wdm_write req->wIndex = desc->inum; req->wLength = cpu_to_le16(count); set_bit(WDM_IN_USE, &desc->flags); + desc->outbuf = buf; rv = usb_submit_urb(desc->command, GFP_KERNEL); if (rv < 0) { kfree(buf); + desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); } else { @@ -386,7 +402,7 @@ static ssize_t wdm_write out: usb_autopm_put_interface(desc->intf); outnp: - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); outnl: return rv < 0 ? rv : count; } @@ -394,22 +410,28 @@ static ssize_t wdm_write static ssize_t wdm_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { - int rv, cntr = 0; + int rv, cntr; int i = 0; struct wdm_device *desc = file->private_data; - rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */ + rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ if (rv < 0) return -ERESTARTSYS; - if (desc->length == 0) { + cntr = ACCESS_ONCE(desc->length); + if (cntr == 0) { desc->read = 0; retry: if (test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = -ENODEV; goto err; } + if (test_bit(WDM_OVERFLOW, &desc->flags)) { + clear_bit(WDM_OVERFLOW, &desc->flags); + rv = -ENOBUFS; + goto err; + } i++; if (file->f_flags & O_NONBLOCK) { if (!test_bit(WDM_READ, &desc->flags)) { @@ -449,21 +471,27 @@ static ssize_t wdm_read spin_unlock_irq(&desc->iuspin); goto retry; } + if (!desc->reslength) { /* zero length read */ + dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); + clear_bit(WDM_READ, &desc->flags); spin_unlock_irq(&desc->iuspin); goto retry; } - clear_bit(WDM_READ, &desc->flags); + cntr = desc->length; spin_unlock_irq(&desc->iuspin); } - cntr = count > desc->length ? desc->length : count; + if (cntr > count) + cntr = count; rv = copy_to_user(buffer, desc->ubuf, cntr); if (rv > 0) { rv = -EFAULT; goto err; } + spin_lock_irq(&desc->iuspin); + for (i = 0; i < desc->length - cntr; i++) desc->ubuf[i] = desc->ubuf[i + cntr]; @@ -471,10 +499,13 @@ static ssize_t wdm_read /* in case we had outstanding data */ if (!desc->length) clear_bit(WDM_READ, &desc->flags); + + spin_unlock_irq(&desc->iuspin); + rv = cntr; err: - mutex_unlock(&desc->lock); + mutex_unlock(&desc->rlock); return rv; } @@ -498,7 +529,7 @@ static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait) spin_lock_irqsave(&desc->iuspin, flags); if (test_bit(WDM_DISCONNECTING, &desc->flags)) { - mask = POLLERR; + mask = POLLHUP | POLLERR; spin_unlock_irqrestore(&desc->iuspin, flags); goto desc_out; } @@ -540,7 +571,8 @@ static int wdm_open(struct inode *inode, struct file *file) } intf->needs_remote_wakeup = 1; - mutex_lock(&desc->lock); + /* using write lock to protect desc->count */ + mutex_lock(&desc->wlock); if (!desc->count++) { desc->werr = 0; desc->rerr = 0; @@ -553,7 +585,7 @@ static int wdm_open(struct inode *inode, struct file *file) } else { rv = 0; } - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); usb_autopm_put_interface(desc->intf); out: mutex_unlock(&wdm_mutex); @@ -565,9 +597,11 @@ static int wdm_release(struct inode *inode, struct file *file) struct wdm_device *desc = file->private_data; mutex_lock(&wdm_mutex); - mutex_lock(&desc->lock); + + /* using write lock to protect desc->count */ + mutex_lock(&desc->wlock); desc->count--; - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); if (!desc->count) { dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); @@ -630,7 +664,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) struct usb_cdc_dmm_desc *dmhd; u8 *buffer = intf->altsetting->extra; int buflen = intf->altsetting->extralen; - u16 maxcom = 0; + u16 maxcom = WDM_DEFAULT_BUFSIZE; if (!buffer) goto out; @@ -665,7 +699,8 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); if (!desc) goto out; - mutex_init(&desc->lock); + mutex_init(&desc->rlock); + mutex_init(&desc->wlock); spin_lock_init(&desc->iuspin); init_waitqueue_head(&desc->wait); desc->wMaxCommand = maxcom; @@ -716,7 +751,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id) goto err; desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf), - desc->bMaxPacketSize0, + desc->wMaxCommand, GFP_KERNEL, &desc->response->transfer_dma); if (!desc->inbuf) @@ -779,11 +814,13 @@ static void wdm_disconnect(struct usb_interface *intf) /* to terminate pending flushes */ clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); - mutex_lock(&desc->lock); + wake_up_all(&desc->wait); + mutex_lock(&desc->rlock); + mutex_lock(&desc->wlock); kill_urbs(desc); cancel_work_sync(&desc->rxwork); - mutex_unlock(&desc->lock); - wake_up_all(&desc->wait); + mutex_unlock(&desc->wlock); + mutex_unlock(&desc->rlock); if (!desc->count) cleanup(desc); mutex_unlock(&wdm_mutex); @@ -798,8 +835,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); /* if this is an autosuspend the caller does the locking */ - if (!(message.event & PM_EVENT_AUTO)) - mutex_lock(&desc->lock); + if (!(message.event & PM_EVENT_AUTO)) { + mutex_lock(&desc->rlock); + mutex_lock(&desc->wlock); + } spin_lock_irq(&desc->iuspin); if ((message.event & PM_EVENT_AUTO) && @@ -815,8 +854,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) kill_urbs(desc); cancel_work_sync(&desc->rxwork); } - if (!(message.event & PM_EVENT_AUTO)) - mutex_unlock(&desc->lock); + if (!(message.event & PM_EVENT_AUTO)) { + mutex_unlock(&desc->wlock); + mutex_unlock(&desc->rlock); + } return rv; } @@ -854,7 +895,8 @@ static int wdm_pre_reset(struct usb_interface *intf) { struct wdm_device *desc = usb_get_intfdata(intf); - mutex_lock(&desc->lock); + mutex_lock(&desc->rlock); + mutex_lock(&desc->wlock); kill_urbs(desc); /* @@ -875,8 +917,10 @@ static int wdm_post_reset(struct usb_interface *intf) struct wdm_device *desc = usb_get_intfdata(intf); int rv; + clear_bit(WDM_OVERFLOW, &desc->flags); rv = recover_from_urb_loss(desc); - mutex_unlock(&desc->lock); + mutex_unlock(&desc->wlock); + mutex_unlock(&desc->rlock); return 0; } diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 0149c0976e9..ca98341a21b 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, /* print devices for all busses */ list_for_each_entry(bus, &usb_bus_list, bus_list) { /* recurse through all children of the root hub */ - if (!bus->root_hub) + if (!bus_to_hcd(bus)->rh_registered) continue; usb_lock_device(bus->root_hub); ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 37518dfdeb9..68d63f84285 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -292,17 +292,14 @@ static struct async *async_getcompleted(struct dev_state *ps) static struct async *async_getpending(struct dev_state *ps, void __user *userurb) { - unsigned long flags; struct async *as; - spin_lock_irqsave(&ps->lock, flags); list_for_each_entry(as, &ps->async_pending, asynclist) if (as->userurb == userurb) { list_del_init(&as->asynclist); - spin_unlock_irqrestore(&ps->lock, flags); return as; } - spin_unlock_irqrestore(&ps->lock, flags); + return NULL; } @@ -357,6 +354,7 @@ static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr) __releases(ps->lock) __acquires(ps->lock) { + struct urb *urb; struct async *as; /* Mark all the pending URBs that match bulk_addr, up to but not @@ -379,8 +377,11 @@ __acquires(ps->lock) list_for_each_entry(as, &ps->async_pending, asynclist) { if (as->bulk_status == AS_UNLINK) { as->bulk_status = 0; /* Only once */ + urb = as->urb; + usb_get_urb(urb); spin_unlock(&ps->lock); /* Allow completions */ - usb_unlink_urb(as->urb); + usb_unlink_urb(urb); + usb_put_urb(urb); spin_lock(&ps->lock); goto rescan; } @@ -431,6 +432,7 @@ static void async_completed(struct urb *urb) static void destroy_async(struct dev_state *ps, struct list_head *list) { + struct urb *urb; struct async *as; unsigned long flags; @@ -438,10 +440,13 @@ static void destroy_async(struct dev_state *ps, struct list_head *list) while (!list_empty(list)) { as = list_entry(list->next, struct async, asynclist); list_del_init(&as->asynclist); + urb = as->urb; + usb_get_urb(urb); /* drop the spinlock so the completion handler can run */ spin_unlock_irqrestore(&ps->lock, flags); - usb_kill_urb(as->urb); + usb_kill_urb(urb); + usb_put_urb(urb); spin_lock_irqsave(&ps->lock, flags); } spin_unlock_irqrestore(&ps->lock, flags); @@ -621,6 +626,8 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: + if ((index & ~USB_DIR_IN) == 0) + return 0; ret = findintfep(ps->dev, index); if (ret >= 0) ret = checkintf(ps, ret); @@ -1335,12 +1342,24 @@ static int proc_submiturb(struct dev_state *ps, void __user *arg) static int proc_unlinkurb(struct dev_state *ps, void __user *arg) { + struct urb *urb; struct async *as; + unsigned long flags; + spin_lock_irqsave(&ps->lock, flags); as = async_getpending(ps, arg); - if (!as) + if (!as) { + spin_unlock_irqrestore(&ps->lock, flags); return -EINVAL; - usb_kill_urb(as->urb); + } + + urb = as->urb; + usb_get_urb(urb); + spin_unlock_irqrestore(&ps->lock, flags); + + usb_kill_urb(urb); + usb_put_urb(urb); + return 0; } @@ -1523,10 +1542,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; - if (as->userbuffer && urb->actual_length) - if (copy_to_user(as->userbuffer, urb->transfer_buffer, - urb->actual_length)) + if (as->userbuffer && urb->actual_length) { + if (urb->number_of_packets > 0) /* Isochronous */ + i = urb->transfer_buffer_length; + else /* Non-Isoc */ + i = urb->actual_length; + if (copy_to_user(as->userbuffer, urb->transfer_buffer, i)) return -EFAULT; + } if (put_user(as->status, &userurb->status)) return -EFAULT; if (put_user(urb->actual_length, &userurb->actual_length)) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index ce22f4a84ed..6c1642b382f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; dev->current_state = PCI_D0; - if (!dev->irq) { + /* The xHCI driver supports MSI and MSI-X, + * so don't fail if the BIOS doesn't provide a legacy IRQ. + */ + if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { dev_err(&dev->dev, "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", pci_name(dev)); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 54338fcc464..493da9275c5 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -985,10 +985,7 @@ static int register_root_hub(struct usb_hcd *hcd) if (retval) { dev_err (parent_dev, "can't register root hub for %s, %d\n", dev_name(&usb_dev->dev), retval); - } - mutex_unlock(&usb_bus_list_lock); - - if (retval == 0) { + } else { spin_lock_irq (&hcd_root_hub_lock); hcd->rh_registered = 1; spin_unlock_irq (&hcd_root_hub_lock); @@ -997,6 +994,7 @@ static int register_root_hub(struct usb_hcd *hcd) if (HCD_DEAD(hcd)) usb_hc_died (hcd); /* This time clean up */ } + mutex_unlock(&usb_bus_list_lock); return retval; } @@ -1395,11 +1393,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; else urb->transfer_flags |= URB_DMA_MAP_SG; - if (n != urb->num_sgs) { - urb->num_sgs = n; + urb->num_mapped_sgs = n; + if (n != urb->num_sgs) urb->transfer_flags |= URB_DMA_SG_COMBINED; - } } else if (urb->sg) { struct scatterlist *sg = urb->sg; urb->transfer_dma = dma_map_page( @@ -2442,8 +2439,10 @@ int usb_add_hcd(struct usb_hcd *hcd, && device_can_wakeup(&hcd->self.root_hub->dev)) dev_dbg(hcd->self.controller, "supports USB remote wakeup\n"); - /* enable irqs just before we start the controller */ - if (usb_hcd_is_primary_hcd(hcd)) { + /* enable irqs just before we start the controller, + * if the BIOS provides legacy PCI irqs. + */ + if (usb_hcd_is_primary_hcd(hcd) && irqnum) { retval = usb_hcd_request_irqs(hcd, irqnum, irqflags); if (retval) goto err_request_irq; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 4d61569b376..e0ca6276712 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -510,13 +511,16 @@ static void hub_tt_work(struct work_struct *work) int limit = 100; spin_lock_irqsave (&hub->tt.lock, flags); - while (--limit && !list_empty (&hub->tt.clear_list)) { + while (!list_empty(&hub->tt.clear_list)) { struct list_head *next; struct usb_tt_clear *clear; struct usb_device *hdev = hub->hdev; const struct hc_driver *drv; int status; + if (!hub->quiescing && --limit < 0) + break; + next = hub->tt.clear_list.next; clear = list_entry (next, struct usb_tt_clear, clear_list); list_del (&clear->clear_list); @@ -734,10 +738,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) if (type == HUB_INIT3) goto init3; - /* After a resume, port power should still be on. + /* The superspeed hub except for root hub has to use Hub Depth + * value as an offset into the route string to locate the bits + * it uses to determine the downstream port number. So hub driver + * should send a set hub depth request to superspeed hub after + * the superspeed hub is set configuration in initialization or + * reset procedure. + * + * After a resume, port power should still be on. * For any other type of activation, turn it on. */ if (type != HUB_RESUME) { + if (hdev->parent && hub_is_superspeed(hdev)) { + ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), + HUB_SET_DEPTH, USB_RT_HUB, + hdev->level - 1, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT); + if (ret < 0) + dev_err(hub->intfdev, + "set hub depth failed\n"); + } /* Speed up system boot by using a delayed_work for the * hub's initial power-up delays. This is pretty awkward @@ -973,7 +993,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); if (hub->tt.hub) - cancel_work_sync(&hub->tt.clear_work); + flush_work_sync(&hub->tt.clear_work); } /* caller has locked the hub device */ @@ -1025,18 +1045,6 @@ static int hub_configure(struct usb_hub *hub, goto fail; } - if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) { - ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), - HUB_SET_DEPTH, USB_RT_HUB, - hdev->level - 1, 0, NULL, 0, - USB_CTRL_SET_TIMEOUT); - - if (ret < 0) { - message = "can't set hub depth"; - goto fail; - } - } - /* Request the entire hub descriptor. * hub->descriptor can handle USB_MAXCHILDREN ports, * but the hub can/will return fewer bytes here. @@ -1679,15 +1687,12 @@ void usb_disconnect(struct usb_device **pdev) { struct usb_device *udev = *pdev; int i; - struct usb_hcd *hcd; if (!udev) { pr_debug ("%s nodev\n", __func__); return; } - hcd = bus_to_hcd(udev->bus); - /* mark the device as inactive, so any further urb submissions for * this device (and any of its children) will fail immediately. * this quiesces everything except pending urbs. @@ -1716,9 +1721,7 @@ void usb_disconnect(struct usb_device **pdev) * so that the hardware is now fully quiesced. */ dev_dbg (&udev->dev, "unregistering device\n"); - mutex_lock(hcd->bandwidth_mutex); usb_disable_device(udev, 0); - mutex_unlock(hcd->bandwidth_mutex); usb_hcd_synchronize_unlinks(udev); usb_remove_ep_devs(&udev->ep0); @@ -1975,6 +1978,14 @@ int usb_new_device(struct usb_device *udev) /* Tell the world! */ announce_device(udev); + if (udev->serial) + add_device_randomness(udev->serial, strlen(udev->serial)); + if (udev->product) + add_device_randomness(udev->product, strlen(udev->product)); + if (udev->manufacturer) + add_device_randomness(udev->manufacturer, + strlen(udev->manufacturer)); + device_enable_async_suspend(&udev->dev); /* Register the device. The device driver is responsible * for configuring the device and invoking the add-device @@ -2111,7 +2122,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define HUB_ROOT_RESET_TIME 50 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 #define HUB_LONG_RESET_TIME 200 -#define HUB_RESET_TIMEOUT 500 +#define HUB_RESET_TIMEOUT 800 static int hub_port_wait_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay) @@ -2490,7 +2501,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) static int finish_port_resume(struct usb_device *udev) { int status = 0; - u16 devstatus; + u16 devstatus = 0; /* caller owns the udev device lock */ dev_dbg(&udev->dev, "%s\n", @@ -2535,7 +2546,13 @@ static int finish_port_resume(struct usb_device *udev) if (status) { dev_dbg(&udev->dev, "gone after usb resume? status %d\n", status); - } else if (udev->actconfig) { + /* + * There are a few quirky devices which violate the standard + * by claiming to have remote wakeup enabled after a reset, + * which crash if the feature is cleared, hence check for + * udev->reset_resume + */ + } else if (udev->actconfig && !udev->reset_resume) { le16_to_cpus(&devstatus); if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { status = usb_control_msg(udev, diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 415593cacf2..6c1ce74a026 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -308,7 +308,8 @@ static void sg_complete(struct urb *urb) retval = usb_unlink_urb(io->urbs [i]); if (retval != -EINPROGRESS && retval != -ENODEV && - retval != -EBUSY) + retval != -EBUSY && + retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); @@ -317,7 +318,6 @@ static void sg_complete(struct urb *urb) } spin_lock(&io->lock); } - urb->dev = NULL; /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; @@ -524,7 +524,6 @@ void usb_sg_wait(struct usb_sg_request *io) case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: - io->urbs[i]->dev = NULL; retval = 0; yield(); break; @@ -542,7 +541,6 @@ void usb_sg_wait(struct usb_sg_request *io) /* fail any uncompleted urbs */ default: - io->urbs[i]->dev = NULL; io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); @@ -593,7 +591,10 @@ void usb_sg_cancel(struct usb_sg_request *io) if (!io->urbs [i]->dev) continue; retval = usb_unlink_urb(io->urbs [i]); - if (retval != -EINPROGRESS && retval != -EBUSY) + if (retval != -EINPROGRESS + && retval != -ENODEV + && retval != -EBUSY + && retval != -EIDRM) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } @@ -1135,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. - * - * Must be called with hcd->bandwidth_mutex held. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { @@ -1189,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); /* Second pass: remove endpoint pointers */ } for (i = skip_ep0; i < 16; ++i) { @@ -1749,7 +1750,6 @@ int usb_set_configuration(struct usb_device *dev, int configuration) /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ - mutex_lock(hcd->bandwidth_mutex); if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ @@ -1762,6 +1762,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ + mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { mutex_unlock(hcd->bandwidth_mutex); @@ -1772,27 +1773,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) dev->actconfig = cp; if (cp) usb_notify_config_device(dev); - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_CONFIGURATION, 0, configuration, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { - /* All the old state is gone, so what else can we do? - * The device is probably useless now anyway. - */ - dev->actconfig = cp = NULL; - } - - if (!cp) { - usb_notify_config_device(dev); - usb_set_device_state(dev, USB_STATE_ADDRESS); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - usb_autosuspend_device(dev); - goto free_interfaces; - } - mutex_unlock(hcd->bandwidth_mutex); - usb_set_device_state(dev, USB_STATE_CONFIGURED); - + /* Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ @@ -1805,7 +1786,6 @@ int usb_set_configuration(struct usb_device *dev, int configuration) intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; - intf->intf_assoc = find_iad(dev, cp, i); kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); @@ -1818,6 +1798,8 @@ int usb_set_configuration(struct usb_device *dev, int configuration) if (!alt) alt = &intf->altsetting[0]; + intf->intf_assoc = + find_iad(dev, cp, alt->desc.bInterfaceNumber); intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); intf->dev.parent = &dev->dev; @@ -1836,6 +1818,35 @@ int usb_set_configuration(struct usb_device *dev, int configuration) } kfree(new_interfaces); + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + USB_REQ_SET_CONFIGURATION, 0, configuration, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); + if (ret < 0 && cp) { + /* + * All the old state is gone, so what else can we do? + * The device is probably useless now anyway. + */ + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + for (i = 0; i < nintf; ++i) { + usb_disable_interface(dev, cp->interface[i], true); + put_device(&cp->interface[i]->dev); + cp->interface[i] = NULL; + } + cp = NULL; + } + + dev->actconfig = cp; + mutex_unlock(hcd->bandwidth_mutex); + + if (!cp) { + usb_set_device_state(dev, USB_STATE_ADDRESS); + + /* Leave LPM disabled while the device is unconfigured. */ + usb_autosuspend_device(dev); + return ret; + } + usb_set_device_state(dev, USB_STATE_CONFIGURED); + if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 81ce6a8e1d9..b6cc1fa2a81 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Microchip Joss Optical infrared touchboard device */ + { USB_DEVICE(0x04d8, 0x000c), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ { USB_DEVICE(0x04e8, 0x6601), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -58,6 +62,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Alcor Micro Corp. Hub */ + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, + /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -69,6 +76,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Midiman M-Audio Keystation 88es */ + { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME }, + /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index 1fc8f124980..347bb058e1e 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void) writel(FLAG_CF, &ehci_regs->configured_flag); /* Wait until the controller is no longer halted */ - loop = 10; + loop = 1000; do { status = readl(&ehci_regs->status); if (!(status & STS_HALT)) diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index da0819171b8..6f6eb1fdb23 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -544,7 +544,7 @@ config USB_LANGWELL select USB_GADGET_SELECTED config USB_GADGET_EG20T - boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC" + tristate "Intel EG20T PCH/OKI SEMICONDUCTOR IOH(ML7213/ML7831) UDC" depends on PCI select USB_GADGET_DUALSPEED help @@ -562,8 +562,9 @@ config USB_GADGET_EG20T This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is for IVI(In-Vehicle Infotainment) use. - ML7213 is companion chip for Intel Atom E6xx series. - ML7213 is completely compatible for Intel EG20T PCH. + ML7831 is for general purpose use. + ML7213/ML7831 is companion chip for Intel Atom E6xx series. + ML7213/ML7831 is completely compatible for Intel EG20T PCH. config USB_EG20T tristate @@ -626,11 +627,11 @@ config USB_GADGET_DUMMY_HCD side is the master; the gadget side is the slave. Gadget drivers can be high, full, or low speed; and they have access to endpoints like those from NET2280, PXA2xx, or SA1100 hardware. - + This may help in some stages of creating a driver to embed in a Linux device, since it lets you debug several parts of the gadget driver without its hardware or drivers being involved. - + Since such a gadget side driver needs to interoperate with a host side Linux-USB device driver, this may help to debug both sides of a USB protocol stack. @@ -736,7 +737,7 @@ config USB_ETH help This driver implements Ethernet style communication, in one of several ways: - + - The "Communication Device Class" (CDC) Ethernet Control Model. That protocol is often avoided with pure Ethernet adapters, in favor of simpler vendor-specific hardware, but is widely @@ -776,7 +777,7 @@ config USB_ETH_RNDIS If you say "y" here, the Ethernet gadget driver will try to provide a second device configuration, supporting RNDIS to talk to such Microsoft USB hosts. - + To make MS-Windows work with this, use Documentation/usb/linux.inf as the "driver info file". For versions of MS-Windows older than XP, you'll need to download drivers from Microsoft's website; a URL @@ -961,241 +962,6 @@ config USB_G_ANDROID The functions can be configured via a board file and may be enabled and disabled dynamically. -config USB_ANDROID_ACM - boolean "Android gadget ACM serial function" - depends on USB_G_ANDROID - help - Provides ACM serial function for android gadget driver. - -config PASCAL_DETECT - boolean "PASCAL DETECT" - depends on USB_ANDROID_ACM - default n - help - Provides ACM serial function for pascal mode. - -config LISMO - boolean "LISMO" - depends on PASCAL_DETECT - default n - help - Provides LISMO service. - -config USB_ANDROID_ADB - boolean "Android gadget adb function" - depends on USB_G_ANDROID - default y - help - Provides adb function for android gadget driver. - -config USB_ANDROID_DIAG - boolean "USB MSM7K Diag Function" - depends on USB_G_ANDROID - default y - help - Qualcomm diagnostics interface support. - -config USB_ANDROID_MDM9K_DIAG - boolean "USB MDM 9k Diag Function" - depends on USB_ANDROID_DIAG - help - Qualcomm diagnostics interface support for 9K. - -config USB_ANDROID_MDM9K_MODEM - boolean "USB MDM 9k Modem Function" - depends on USB_G_ANDROID - help - Qualcomm Modem interface support for 9K. - -config USB_ANDROID_MASS_STORAGE - boolean "Android gadget mass storage function" - depends on USB_G_ANDROID && SWITCH - default y - help - Provides USB mass storage function for android gadget driver. - -config USB_ANDROID_MTP - boolean "Android MTP function" - depends on USB_G_ANDROID - help - Provides Media Transfer Protocol (MTP) support for android gadget driver. - -config USB_ANDROID_RNDIS - boolean "Android gadget RNDIS ethernet function" - depends on USB_G_ANDROID - default n - help - Provides RNDIS ethernet function for android gadget driver. - -config USB_ANDROID_RMNET - boolean "RmNet function driver" - depends on USB_G_ANDROID - default n - help - Enabling this option adds rmnet support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and modem found in MSM chipsets. - -config RMNET_SMD_CTL_CHANNEL - string "control SMD channel name" - depends on USB_ANDROID_RMNET - default "" - help - Control SMD channel for transferring QMI messages - -config RMNET_SMD_DATA_CHANNEL - string "Data SMD channel name" - depends on USB_ANDROID_RMNET - default "" - help - Data SMD channel for transferring network data - -config USB_ANDROID_RMNET_SDIO - boolean "RmNet over SDIO function driver" - depends on USB_G_ANDROID && MSM_SDIO_CMUX && MSM_SDIO_DMUX - default n - help - Enabling this option adds rmnet over sdio support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and modem found in MSM chipsets. - -config RMNET_SDIO_CTL_CHANNEL - int "control SDIO channel id" - depends on USB_ANDROID_RMNET_SDIO - help - Control SDIO channel for transferring QMI messages - -config RMNET_SDIO_DATA_CHANNEL - int "Data SDIO channel id" - depends on USB_ANDROID_RMNET_SDIO - help - Data SDIO channel for transferring network data - -config USB_ANDROID_RMNET_SMD_SDIO - boolean "RmNet over SMD/SDIO function driver" - depends on USB_G_ANDROID && MSM_SDIO_CMUX && MSM_SDIO_DMUX - default n - help - Enabling this option adds rmnet over sdio support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and modem found in MSM chipsets. - -config RMNET_SMD_SDIO_CTL_CHANNEL - int "control SDIO channel id" - depends on USB_ANDROID_RMNET_SMD_SDIO - default 8 - help - Control SDIO channel for transferring QMI messages - -config RMNET_SMD_SDIO_DATA_CHANNEL - int "Data SDIO channel id" - default 8 - depends on USB_ANDROID_RMNET_SMD_SDIO - help - Data SDIO channel for transferring network data - -config RMNET_SDIO_SMD_DATA_CHANNEL - string "Data SMD channel name" - depends on USB_ANDROID_RMNET_SMD_SDIO - default "DATA40" - help - Data SMD channel for transferring network data - -config USB_ANDROID_RMNET_BAM - boolean "RmNet over BAM driver" - depends on USB_G_ANDROID && MSM_BAM_DMUX - help - Enabling this option adds rmnet over BAM support to the - android gadget. Rmnet is an alternative to CDC-ECM - and Windows RNDIS. It uses QUALCOMM MSM Interface - for control transfers. It acts like a bridge between - Host and Modem processor using bam-dmux interface. - This option enables only DATA interface. Control - interface has to be enabled separately - -config USB_ANDROID_RMNET_CTRL_SMD - boolean "RmNet control over SMD driver" - depends on USB_G_ANDROID && MSM_SMD - help - Enabling this option adds rmnet control over SMD - support to the android gadget. Rmnet is an - alternative to CDC-ECM and Windows RNDIS. - It uses QUALCOMM MSM Interface for control - transfers. This option enables only control interface. - Data interface has to be enabled separately - -config USB_F_SERIAL - boolean "generic serial function driver" - depends on USB_G_ANDROID - default n - help - Say "y" to link the driver statically, or "m" to build - as a part of "g_android" - -config MODEM_SUPPORT - boolean "modem support in generic serial function driver" - depends on USB_F_SERIAL - default y - help - This feature enables the modem functionality in the - generic serial. - adds interrupt endpoint support to send modem notifications - to host. - adds CDC descriptors to enumerate the generic serial as MODEM. - adds CDC class requests to configure MODEM line settings. - Say "y" to enable MODEM support in the generic serial driver. - -config USB_ANDROID_SERIAL - boolean "Android gadget Serial function" - depends on USB_G_ANDROID - help - -config USB_ANDROID_PROJECTOR - boolean "Android gadget Projector function" - depends on USB_G_ANDROID - help - -config USB_ANDROID_ECM - boolean "Android gadget ECM function" - depends on USB_G_ANDROID - help - -config USB_F_SERIAL_SDIO - boolean "generic serial function driver over SDIO" - depends on USB_G_ANDROID && MSM_SDIO_CMUX && MSM_SDIO_AL - default n - help - Gadget serial driver is one way to communicate with modem - device. This option enables serial driver to communicate - to modem device using sdio. It makes very minimal changes - to Gadget usb serial interface(f_serial.c) and adds - separate transport layer u_sdio(very similar to u_serial.c) - Say "y" to link the driver staticall - -config USB_F_SERIAL_SMD - boolean "generic serial function driver over SMD" - depends on USB_G_ANDROID && MSM_SMD - default n - help - Gadget serial driver is one way to communicate with modem - device. This option enables serial driver to communicate - to modem device using smd. It makes very minimal changes - to Gadget usb serial interface(f_serial.c) and adds - separate transport layer u_smd(very similar to u_serial.c) - Say "y" to link the driver statically - -config USB_ANDROID_USBNET - boolean "Android gadget USBNET function for VoIP" - depends on USB_G_ANDROID - default n - help - config USB_CDC_COMPOSITE tristate "CDC Composite Device (Ethernet and ACM)" depends on NET @@ -1317,10 +1083,6 @@ config USB_G_WEBCAM endchoice -config USB_ACCESSORY_DETECT_BY_ADC - boolean "DETECT USB ACCESSORY BY PMIC ADC" - default n - config USB_CSW_HACK boolean "USB Mass storage csw hack Feature" default y @@ -1328,6 +1090,12 @@ config USB_CSW_HACK This csw hack feature is for increasing the performance of the mass storage +config USB_MSC_PROFILING + bool "USB MSC performance profiling" + help + If you say Y here, support will be added for collecting + Mass-storage performance numbers at the VFS level. + config MODEM_SUPPORT boolean "modem support in generic serial function driver" depends on USB_G_ANDROID @@ -1401,9 +1169,11 @@ config USB_ANDROID_RMNET_CTRL_SMD transfers. This option enables only control interface. Data interface used is BAM. -config USB_GADGET_VERIZON_PRODUCT_ID - boolean "Verizon Product ID Mapping" +endif # USB_GADGET + +config USB_HTC_SWITCH_STUB depends on USB_G_ANDROID + bool "USB HTC function switch stub" default n - -endif # USB_GADGET + help + Provide dummy HTC USB function switch sysfs device for libhtc_ril. diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c index c9db83207d5..f24e74d8df4 100644 --- a/drivers/usb/gadget/android.c +++ b/drivers/usb/gadget/android.c @@ -2,6 +2,7 @@ * Gadget Driver for Android * * Copyright (C) 2008 Google, Inc. + * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * Author: Mike Lockwood * * This software is licensed under the terms of the GNU General Public @@ -30,13 +31,9 @@ #include #include #include -#include +#include #include "gadget_chips.h" -#include -#ifdef CONFIG_PERFLOCK -#include -#endif /* * Kbuild is not very cooperative with respect to linking separately @@ -51,73 +48,28 @@ #include "composite.c" #include "f_diag.c" -#if defined(CONFIG_USB_ANDROID_RMNET_SMD) #include "f_rmnet_smd.c" -#elif defined(CONFIG_USB_ANDROID_RMNET_SDIO) #include "f_rmnet_sdio.c" -#elif defined(CONFIG_USB_ANDROID_RMNET_SMD_SDIO) #include "f_rmnet_smd_sdio.c" -#elif defined(CONFIG_USB_ANDROID_RMNET_BAM) #include "f_rmnet.c" -#endif #include "f_mass_storage.c" #include "u_serial.c" #include "u_sdio.c" #include "u_smd.c" #include "u_bam.c" #include "u_rmnet_ctrl_smd.c" +#include "u_ctrl_hsic.c" +#include "u_data_hsic.c" #include "f_serial.c" -#ifdef CONFIG_USB_ANDROID_ACM #include "f_acm.c" -#endif #include "f_adb.c" -#if 0 #include "f_ccid.c" -#endif -#ifdef CONFIG_USB_ANDROID_MTP #include "f_mtp.c" -#endif #include "f_accessory.c" #define USB_ETH_RNDIS y -#ifdef CONFIG_USB_ANDROID_RNDIS #include "f_rndis.c" #include "rndis.c" -#endif -#ifdef CONFIG_USB_ANDROID_ECM -#include "f_ecm.c" -#endif #include "u_ether.c" -#ifdef CONFIG_USB_ANDROID_PROJECTOR -#include "f_projector.c" -#endif -#ifdef CONFIG_USB_ANDROID_USBNET -#include "f_usbnet.c" -#endif -#include - -#ifdef pr_debug -#undef pr_debug -#endif -#define pr_debug(fmt, args...) \ - printk(KERN_DEBUG "[USB] " pr_fmt(fmt), ## args) - -#ifdef pr_err -#undef pr_err -#endif -#define pr_err(fmt, args...) \ - printk(KERN_ERR "[USB] " pr_fmt(fmt), ## args) - -#ifdef pr_warning -#undef pr_warning -#endif -#define pr_warning(fmt, args...) \ - printk(KERN_WARNING "[USB] " pr_fmt(fmt), ## args) - -#ifdef pr_info -#undef pr_info -#endif -#define pr_info(fmt, args...) \ - printk(KERN_INFO "[USB] " pr_fmt(fmt), ## args) MODULE_AUTHOR("Mike Lockwood"); MODULE_DESCRIPTION("Android Composite USB Driver"); @@ -130,8 +82,6 @@ static const char longname[] = "Gadget Android"; #define VENDOR_ID 0x18D1 #define PRODUCT_ID 0x0001 -static bool connect2pc; - struct android_usb_function { char *name; void *config; @@ -152,14 +102,10 @@ struct android_usb_function { /* Optional: called when the configuration is removed */ void (*unbind_config)(struct android_usb_function *, struct usb_configuration *); - /* Optional: handle ctrl requests before the device is configured - * and/or before the function is enabled */ + /* Optional: handle ctrl requests before the device is configured */ int (*ctrlrequest)(struct android_usb_function *, struct usb_composite_dev *, const struct usb_ctrlrequest *); - - /* for performance requirement */ - int performance_lock; }; struct android_dev { @@ -173,34 +119,13 @@ struct android_dev { bool connected; bool sw_connected; struct work_struct work; - struct delayed_work init_work; - /* waiting for enabling functions */ - struct list_head function_list; - - int num_products; - struct android_usb_product *products; - int num_functions; - char **in_house_functions; - - int product_id; - void (*enable_fast_charge)(bool enable); - bool RndisDisableMPDecision; - int (*match)(int product_id, int intrsharing); }; static struct class *android_class; static struct android_dev *_android_dev; - -static struct wake_lock android_usb_idle_wake_lock; -#ifdef CONFIG_PERFLOCK -static struct perf_lock android_usb_perf_lock; -#endif - - static int android_bind_config(struct usb_configuration *c); static void android_unbind_config(struct usb_configuration *c); - /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 @@ -247,6 +172,12 @@ static struct usb_configuration android_config_driver = { .bMaxPower = 0xFA, /* 500ma */ }; +enum android_device_state { + USB_DISCONNECTED, + USB_CONNECTED, + USB_CONFIGURED, +}; + static void android_work(struct work_struct *data) { struct android_dev *dev = container_of(data, struct android_dev, work); @@ -254,119 +185,57 @@ static void android_work(struct work_struct *data) char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL }; char *connected[2] = { "USB_STATE=CONNECTED", NULL }; char *configured[2] = { "USB_STATE=CONFIGURED", NULL }; + char **uevent_envp = NULL; + static enum android_device_state last_uevent, next_state; unsigned long flags; - struct android_usb_function *f; - int count = 0; - - /* release performance related locks first */ - if (wake_lock_active(&android_usb_idle_wake_lock)) - wake_unlock(&android_usb_idle_wake_lock); -#ifdef CONFIG_PERFLOCK - if (is_perf_lock_active(&android_usb_perf_lock)) - perf_unlock(&android_usb_perf_lock); -#endif - spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) { - spin_unlock_irqrestore(&cdev->lock, flags); - kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, - configured); - pr_info("USB_STATE=CONFIGURED"); - - /* hold perflock, wakelock for performance consideration */ - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - if (f->performance_lock) { - pr_info("Performance lock for '%s'\n", f->name); - count++; - } - } - if (count) { - if (!wake_lock_active(&android_usb_idle_wake_lock)) - wake_lock(&android_usb_idle_wake_lock); -#ifdef CONFIG_PERFLOCK - if (!is_perf_lock_active(&android_usb_perf_lock)) - perf_lock(&android_usb_perf_lock); -#endif - } - - if (!connect2pc && dev->connected) { - connect2pc = true; - switch_set_state(&cdev->sw_connect2pc, 1); - pr_info("set usb_connect2pc = 1\n"); - } - return; + uevent_envp = configured; + next_state = USB_CONFIGURED; + } else if (dev->connected != dev->sw_connected) { + uevent_envp = dev->connected ? connected : disconnected; + next_state = dev->connected ? USB_CONNECTED : USB_DISCONNECTED; } + dev->sw_connected = dev->connected; + spin_unlock_irqrestore(&cdev->lock, flags); - if (dev->connected != dev->sw_connected) { - dev->sw_connected = dev->connected; - spin_unlock_irqrestore(&cdev->lock, flags); - kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, - dev->sw_connected ? connected : disconnected); + if (uevent_envp) { + /* + * Some userspace modules, e.g. MTP, work correctly only if + * CONFIGURED uevent is preceded by DISCONNECT uevent. + * Check if we missed sending out a DISCONNECT uevent. This can + * happen if host PC resets and configures device really quick. + */ + if (((uevent_envp == connected) && + (last_uevent != USB_DISCONNECTED)) || + ((uevent_envp == configured) && + (last_uevent == USB_CONFIGURED))) { + pr_info("%s: sent missed DISCONNECT event\n", __func__); + kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, + disconnected); + msleep(20); + } + /* + * Before sending out CONFIGURED uevent give function drivers + * a chance to wakeup userspace threads and notify disconnect + */ + if (uevent_envp == configured) + msleep(50); - pr_info("%s\n", dev->connected ? connected[0] : disconnected[0]); + kobject_uevent_env(&dev->dev->kobj, KOBJ_CHANGE, uevent_envp); + last_uevent = next_state; + pr_info("%s: sent uevent %s\n", __func__, uevent_envp[0]); } else { - spin_unlock_irqrestore(&cdev->lock, flags); - } - - if (connect2pc && !dev->connected) { - connect2pc = false; - switch_set_state(&cdev->sw_connect2pc, 0); - pr_info("set usb_connect2pc = 0\n"); + pr_info("%s: did not send uevent (%d %d %p)\n", __func__, + dev->connected, dev->sw_connected, cdev->config); } } /*-------------------------------------------------------------------------*/ /* Supported functions initialization */ -static ssize_t func_en_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct android_usb_function *func = dev_get_drvdata(dev); - struct android_usb_function *f; - int ebl = 0; - - list_for_each_entry(f, &_android_dev->enabled_functions, enabled_list) { - if (!strcmp(func->name, f->name)) { - ebl = 1; - break; - } - } - return sprintf(buf, "%d", ebl); -} - -static ssize_t func_en_store( - struct device *dev, struct device_attribute *attr, - const char *buf, size_t size) -{ - struct android_usb_function *func = dev_get_drvdata(dev); - struct android_usb_function *f; - int ebl = 0; - int value; - - sscanf(buf, "%d", &value); - list_for_each_entry(f, &_android_dev->enabled_functions, enabled_list) { - if (!strcmp(func->name, f->name)) { - ebl = 1; - break; - } - } - if (!!value == ebl) { - pr_info("%s function is already %s\n", func->name - , ebl ? "enable" : "disable"); - return size; - } - - if (value) - htc_usb_enable_function(func->name, 1); - else - htc_usb_enable_function(func->name, 0); - - return size; -} -static DEVICE_ATTR(on, S_IRUGO | S_IWUSR | S_IWGRP, func_en_show, func_en_store); -#if defined(CONFIG_USB_ANDROID_RMNET_SMD) /* RMNET_SMD */ static int rmnet_smd_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) @@ -375,11 +244,9 @@ static int rmnet_smd_function_bind_config(struct android_usb_function *f, } static struct android_usb_function rmnet_smd_function = { - .name = "rmnet", + .name = "rmnet_smd", .bind_config = rmnet_smd_function_bind_config, - .performance_lock = 1, }; -#elif defined(CONFIG_USB_ANDROID_RMNET_SDIO) /* RMNET_SDIO */ static int rmnet_sdio_function_bind_config(struct android_usb_function *f, @@ -389,12 +256,10 @@ static int rmnet_sdio_function_bind_config(struct android_usb_function *f, } static struct android_usb_function rmnet_sdio_function = { - .name = "rmnet", + .name = "rmnet_sdio", .bind_config = rmnet_sdio_function_bind_config, - .performance_lock = 1, }; -#elif defined(CONFIG_USB_ANDROID_RMNET_SMD_SDIO) /* RMNET_SMD_SDIO */ static int rmnet_smd_sdio_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) @@ -417,22 +282,16 @@ static struct device_attribute *rmnet_smd_sdio_attributes[] = { &dev_attr_transport, NULL }; static struct android_usb_function rmnet_smd_sdio_function = { - .name = "rmnet", + .name = "rmnet_smd_sdio", .init = rmnet_smd_sdio_function_init, .cleanup = rmnet_smd_sdio_function_cleanup, .bind_config = rmnet_smd_sdio_bind_config, .attributes = rmnet_smd_sdio_attributes, - .performance_lock = 1, }; -#elif defined(CONFIG_USB_ANDROID_RMNET_BAM) -/* RMNET - used with BAM */ -#define MAX_RMNET_INSTANCES 1 -static int rmnet_instances = 1; -static int rmnet_function_init(struct android_usb_function *f, - struct usb_composite_dev *cdev) -{ - return frmnet_init_port(MAX_RMNET_INSTANCES); -} + +/*rmnet transport string format(per port):"ctrl0,data0,ctrl1,data1..." */ +#define MAX_XPORT_STR_LEN 50 +static char rmnet_transports[MAX_XPORT_STR_LEN]; static void rmnet_function_cleanup(struct android_usb_function *f) { @@ -443,52 +302,78 @@ static int rmnet_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { int i; - int ret = 0; + int err = 0; + char *ctrl_name; + char *data_name; + char buf[MAX_XPORT_STR_LEN], *b; + static int rmnet_initialized, ports; + + if (!rmnet_initialized) { + rmnet_initialized = 1; + strlcpy(buf, rmnet_transports, sizeof(buf)); + b = strim(buf); + while (b) { + ctrl_name = strsep(&b, ","); + data_name = strsep(&b, ","); + if (ctrl_name && data_name) { + err = frmnet_init_port(ctrl_name, data_name); + if (err) { + pr_err("rmnet: Cannot open ctrl port:" + "'%s' data port:'%s'\n", + ctrl_name, data_name); + goto out; + } + ports++; + } + } - for (i = 0; i < rmnet_instances; i++) { - ret = frmnet_bind_config(c, i); - if (ret) { + err = rmnet_gport_setup(); + if (err) { + pr_err("rmnet: Cannot setup transports"); + goto out; + } + } + + for (i = 0; i < ports; i++) { + err = frmnet_bind_config(c, i); + if (err) { pr_err("Could not bind rmnet%u config\n", i); break; } } - - return ret; +out: + return err; } -static ssize_t rmnet_instances_show(struct device *dev, +static ssize_t rmnet_transports_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", rmnet_instances); + return snprintf(buf, PAGE_SIZE, "%s\n", rmnet_transports); } -static ssize_t rmnet_instances_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t rmnet_transports_store( + struct device *device, struct device_attribute *attr, + const char *buff, size_t size) { - int value; + strlcpy(rmnet_transports, buff, sizeof(rmnet_transports)); - pr_info("%s, buff: %s\n", __func__, buf); - sscanf(buf, "%d", &value); - if (value > MAX_RMNET_INSTANCES) - value = MAX_RMNET_INSTANCES; - rmnet_instances = value; return size; } -static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, rmnet_instances_show, - rmnet_instances_store); +static struct device_attribute dev_attr_rmnet_transports = + __ATTR(transports, S_IRUGO | S_IWUSR, + rmnet_transports_show, + rmnet_transports_store); static struct device_attribute *rmnet_function_attributes[] = { - &dev_attr_instances, NULL }; + &dev_attr_rmnet_transports, + NULL }; static struct android_usb_function rmnet_function = { .name = "rmnet", - .init = rmnet_function_init, .cleanup = rmnet_function_cleanup, .bind_config = rmnet_function_bind_config, .attributes = rmnet_function_attributes, - .performance_lock = 1, }; -#endif /* DIAG */ static char diag_clients[32]; /*enabled DIAG clients- "diag[,diag_mdm]" */ @@ -496,7 +381,6 @@ static ssize_t clients_store( struct device *device, struct device_attribute *attr, const char *buff, size_t size) { - pr_info("%s, buff: %s\n", __func__, buff); strlcpy(diag_clients, buff, sizeof(diag_clients)); return size; @@ -520,16 +404,6 @@ static void diag_function_cleanup(struct android_usb_function *f) static int diag_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { -#if 1 - int err; - int (*notify)(uint32_t, const char *); - - notify = _android_dev->pdata->update_pid_and_serial_num; - - err = diag_function_add(c, "diag", notify); - if (err) - pr_err("diag: Cannot open channel 'diag'"); -#else char *name; char buf[32], *b; int once = 0, err = -1; @@ -551,7 +425,7 @@ static int diag_function_bind_config(struct android_usb_function *f, pr_err("diag: Cannot open channel '%s'", name); } } -#endif + return err; } @@ -563,149 +437,65 @@ static struct android_usb_function diag_function = { .attributes = diag_function_attributes, }; -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) -static int diag_mdm_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) +/* SERIAL */ +static char serial_transports[32]; /*enabled FSERIAL ports - "tty[,sdio]"*/ +static ssize_t serial_transports_store( + struct device *device, struct device_attribute *attr, + const char *buff, size_t size) { - int err; - int (*notify)(uint32_t, const char *); + strlcpy(serial_transports, buff, sizeof(serial_transports)); - notify = NULL; + return size; +} - err = diag_function_add(c, "diag_mdm", notify); - if (err) - pr_err("diag: Cannot open channel 'diag_mdm'"); +static DEVICE_ATTR(transports, S_IWUSR, NULL, serial_transports_store); +static struct device_attribute *serial_function_attributes[] = + { &dev_attr_transports, NULL }; - return 0; +static void serial_function_cleanup(struct android_usb_function *f) +{ + gserial_cleanup(); } -static struct android_usb_function diag_mdm_function = { - .name = "diag_mdm", - .bind_config = diag_mdm_function_bind_config, -}; -#endif - -/* Serial, Modem */ -static int serial_driver_initial(struct usb_configuration *c) +static int serial_function_bind_config(struct android_usb_function *f, + struct usb_configuration *c) { - char *name, *str[2]; - char buf[80], *b; - int err = -1; + char *name; + char buf[32], *b; + int err = -1, i; static int serial_initialized = 0, ports = 0; - char *init_string; - - if (serial_initialized) { - pr_info("%s: already initial\n", __func__); - return ports; - } - serial_initialized = 1; - - init_string = _android_dev->pdata->fserial_init_string ? - _android_dev->pdata->fserial_init_string : - "smd:modem,tty,tty,tty:serial"; - strncpy(buf, init_string, sizeof(buf)); - buf[79] = 0; - pr_info("%s: init string: %s\n", __func__, buf); + if (serial_initialized) + goto bind_config; + serial_initialized = 1; + strlcpy(buf, serial_transports, sizeof(buf)); b = strim(buf); while (b) { - str[0] = str[1] = 0; name = strsep(&b, ","); + if (name) { - str[0] = strsep(&name, ":"); - if (str[0]) - str[1] = strsep(&name, ":"); - } - err = gserial_init_port(ports, str[0], str[1]); - if (err) { - pr_err("serial: Cannot open port '%s'\n", str[0]); - goto out; + err = gserial_init_port(ports, name); + if (err) { + pr_err("serial: Cannot open port '%s'", name); + goto out; + } + ports++; } - ports++; } - err = gport_setup(c); if (err) { pr_err("serial: Cannot setup transports"); goto out; } - return ports; -out: - return err; -} - -/* Modem */ -static void modem_function_cleanup(struct android_usb_function *f) -{ - struct android_dev *dev = _android_dev; - - /* ToDo: need to cleanup by different channel */ - gsmd_cleanup(dev->cdev->gadget, 1); -} - -static int modem_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - int err = -1; - int i, ports; - - ports = serial_driver_initial(c); - if (ports < 0) - goto out; - - - for (i = 0; i < ports; i++) { - if (gserial_ports[i].func_type == USB_FSER_FUNC_MODEM) { - err = gser_bind_config(c, i); - if (err) { - pr_err("serial: bind_config failed for port %d", i); - goto out; - } - } - } - -out: - return err; -} - -static struct android_usb_function modem_function = { - .name = "modem", - .cleanup = modem_function_cleanup, - .bind_config = modem_function_bind_config, - .performance_lock = 1, -}; - -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM -/* Modem_Mdm */ -static void modem_mdm_function_cleanup(struct android_usb_function *f) -{ - struct android_dev *dev = _android_dev; - - /* ToDo: need to cleanup by different channel */ - gsmd_cleanup(dev->cdev->gadget, 1); -} - -static int modem_mdm_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - int err = -1; - int i, ports; - - ports = serial_driver_initial(c); - if (ports < 0) - goto out; - - - for (i = 0; i < ports; i++) { - if (gserial_ports[i].func_type == USB_FSER_FUNC_MODEM_MDM) { - err = gser_bind_config(c, i); - if (err) { - pr_err("serial: bind_config failed for port %d", i); - goto out; - } +bind_config: + for (i = 0; i < ports; i++) { + err = gser_bind_config(c, i); + if (err) { + pr_err("serial: bind_config failed for port %d", i); + goto out; } } @@ -713,103 +503,83 @@ static int modem_mdm_function_bind_config(struct android_usb_function *f, return err; } -static struct android_usb_function modem_mdm_function = { - .name = "modem_mdm", - .cleanup = modem_mdm_function_cleanup, - .bind_config = modem_mdm_function_bind_config, - .performance_lock = 1, +static struct android_usb_function serial_function = { + .name = "serial", + .cleanup = serial_function_cleanup, + .bind_config = serial_function_bind_config, + .attributes = serial_function_attributes, }; -#endif -/* SERIAL */ -static char serial_transports[32]; /*enabled FSERIAL ports - "tty[,sdio]"*/ -static ssize_t serial_transports_store( +/* ACM */ +static char acm_transports[32]; /*enabled ACM ports - "tty[,sdio]"*/ +static ssize_t acm_transports_store( struct device *device, struct device_attribute *attr, const char *buff, size_t size) { - pr_info("%s: %s\n", __func__, buff); - strlcpy(serial_transports, buff, sizeof(serial_transports)); + strlcpy(acm_transports, buff, sizeof(acm_transports)); return size; } -static DEVICE_ATTR(transports, S_IWUSR, NULL, serial_transports_store); -static struct device_attribute *serial_function_attributes[] = - { &dev_attr_transports, NULL }; +static DEVICE_ATTR(acm_transports, S_IWUSR, NULL, acm_transports_store); +static struct device_attribute *acm_function_attributes[] = { + &dev_attr_acm_transports, NULL }; -static void serial_function_cleanup(struct android_usb_function *f) +static void acm_function_cleanup(struct android_usb_function *f) { gserial_cleanup(); } -static int serial_function_bind_config(struct android_usb_function *f, +static int acm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) { -#if 1 - int err = -1; - int i, ports; - - ports = serial_driver_initial(c); - if (ports < 0) - goto out; - for (i = 0; i < ports; i++) { - if (gserial_ports[i].func_type == USB_FSER_FUNC_SERIAL) { - err = gser_bind_config(c, i); - if (err) { - pr_err("serial: bind_config failed for port %d", i); - goto out; - } - } - } -#else char *name; char buf[32], *b; int err = -1, i; - static int serial_initialized = 0, ports = 0; + static int acm_initialized, ports; - if (serial_initialized) + if (acm_initialized) goto bind_config; - serial_initialized = 1; - strlcpy(buf, serial_transports, sizeof(buf)); + acm_initialized = 1; + strlcpy(buf, acm_transports, sizeof(buf)); b = strim(buf); while (b) { name = strsep(&b, ","); + if (name) { - err = gserial_init_port(ports, name); + err = acm_init_port(ports, name); if (err) { - pr_err("serial: Cannot open port '%s'", name); + pr_err("acm: Cannot open port '%s'", name); goto out; } ports++; } } - err = gport_setup(c); + err = acm_port_setup(c); if (err) { - pr_err("serial: Cannot setup transports"); + pr_err("acm: Cannot setup transports"); goto out; } bind_config: for (i = 0; i < ports; i++) { - err = gser_bind_config(c, i); + err = acm_bind_config(c, i); if (err) { - pr_err("serial: bind_config failed for port %d", i); + pr_err("acm: bind_config failed for port %d", i); goto out; } } -#endif out: return err; } - -static struct android_usb_function serial_function = { - .name = "serial", - .cleanup = serial_function_cleanup, - .bind_config = serial_function_bind_config, - .attributes = serial_function_attributes, +static struct android_usb_function acm_function = { + .name = "acm", + .cleanup = acm_function_cleanup, + .bind_config = acm_function_bind_config, + .attributes = acm_function_attributes, }; /* ADB */ @@ -836,7 +606,6 @@ static struct android_usb_function adb_function = { }; /* CCID */ -#if 0 static int ccid_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { @@ -857,88 +626,10 @@ static int ccid_function_bind_config(struct android_usb_function *f, static struct android_usb_function ccid_function = { .name = "ccid", .init = ccid_function_init, - .cleanup = ccid_function_cleanup, - .bind_config = ccid_function_bind_config, -}; -#endif - -#ifdef CONFIG_USB_ANDROID_ACM -#define MAX_ACM_INSTANCES 4 -struct acm_function_config { - int instances; -}; - -static int acm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) -{ - struct acm_function_config *config; - f->config = kzalloc(sizeof(struct acm_function_config), GFP_KERNEL); - if (!f->config) - return -ENOMEM; - - config = f->config; - config->instances = 1; - return gserial_setup(cdev->gadget, MAX_ACM_INSTANCES); -} - -static void acm_function_cleanup(struct android_usb_function *f) -{ - gserial_cleanup(); - kfree(f->config); - f->config = NULL; -} - -static int acm_function_bind_config(struct android_usb_function *f, struct usb_configuration *c) -{ - int i; - int ret = 0; - struct acm_function_config *config = f->config; - - for (i = 0; i < config->instances; i++) { - ret = acm_bind_config(c, i); - if (ret) { - pr_err("Could not bind acm%u config\n", i); - break; - } - } - - return ret; -} - -static ssize_t acm_instances_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct acm_function_config *config = f->config; - return sprintf(buf, "%d\n", config->instances); -} - -static ssize_t acm_instances_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct acm_function_config *config = f->config; - int value; - - sscanf(buf, "%d", &value); - if (value > MAX_ACM_INSTANCES) - value = MAX_ACM_INSTANCES; - config->instances = value; - return size; -} - -static DEVICE_ATTR(instances, S_IRUGO | S_IWUSR, acm_instances_show, acm_instances_store); -static struct device_attribute *acm_function_attributes[] = { &dev_attr_instances, NULL }; - -static struct android_usb_function acm_function = { - .name = "acm", - .init = acm_function_init, - .cleanup = acm_function_cleanup, - .bind_config = acm_function_bind_config, - .attributes = acm_function_attributes, + .cleanup = ccid_function_cleanup, + .bind_config = ccid_function_bind_config, }; -#endif -#ifdef CONFIG_USB_ANDROID_MTP static int mtp_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { return mtp_setup(); @@ -992,108 +683,8 @@ static struct android_usb_function ptp_function = { .cleanup = ptp_function_cleanup, .bind_config = ptp_function_bind_config, }; -#endif - -#ifdef CONFIG_USB_ANDROID_ECM -/* ECM */ -struct ecm_function_config { - u8 ethaddr[ETH_ALEN]; -}; - -static int ecm_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) -{ - struct ecm_function_config *ecm; - f->config = kzalloc(sizeof(struct ecm_function_config), GFP_KERNEL); - if (!f->config) - return -ENOMEM; - - ecm = f->config; - return 0; -} - -static void ecm_function_cleanup(struct android_usb_function *f) -{ - kfree(f->config); - f->config = NULL; -} - -static int ecm_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - int ret; - struct ecm_function_config *ecm = f->config; - - if (!ecm) { - pr_err("%s: ecm_pdata\n", __func__); - return -1; - } - - - pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__, - ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2], - ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]); - - ret = gether_setup_name(c->cdev->gadget, ecm->ethaddr, "usb"); - if (ret) { - pr_err("%s: gether_setup failed\n", __func__); - return ret; - } - - - return ecm_bind_config(c, ecm->ethaddr); -} - -static void ecm_function_unbind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - gether_cleanup(); -} - -static ssize_t ecm_ethaddr_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct ecm_function_config *ecm = f->config; - return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", - ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2], - ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]); -} - -static ssize_t ecm_ethaddr_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) -{ - struct android_usb_function *f = dev_get_drvdata(dev); - struct ecm_function_config *ecm = f->config; - - if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", - (int *)&ecm->ethaddr[0], (int *)&ecm->ethaddr[1], - (int *)&ecm->ethaddr[2], (int *)&ecm->ethaddr[3], - (int *)&ecm->ethaddr[4], (int *)&ecm->ethaddr[5]) == 6) - return size; - return -EINVAL; -} - -static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show, - ecm_ethaddr_store); - -static struct device_attribute *ecm_function_attributes[] = { - &dev_attr_ecm_ethaddr, - NULL -}; -static struct android_usb_function ecm_function = { - .name = "cdc_ethernet", - .init = ecm_function_init, - .cleanup = ecm_function_cleanup, - .bind_config = ecm_function_bind_config, - .unbind_config = ecm_function_unbind_config, - .attributes = ecm_function_attributes, - .performance_lock = 1, -}; -#endif -#ifdef CONFIG_USB_ANDROID_RNDIS -/* RNDIS */ struct rndis_function_config { u8 ethaddr[ETH_ALEN]; u32 vendorID; @@ -1103,18 +694,9 @@ struct rndis_function_config { static int rndis_function_init(struct android_usb_function *f, struct usb_composite_dev *cdev) { - struct rndis_function_config *rndis; - struct android_dev *dev = _android_dev; - f->config = kzalloc(sizeof(struct rndis_function_config), GFP_KERNEL); if (!f->config) return -ENOMEM; - - rndis = f->config; - - strncpy(rndis->manufacturer, dev->pdata->manufacturer_name, sizeof(rndis->manufacturer)); - rndis->vendorID = dev->pdata->vendor_id; - return 0; } @@ -1139,7 +721,7 @@ static int rndis_function_bind_config(struct android_usb_function *f, rndis->ethaddr[0], rndis->ethaddr[1], rndis->ethaddr[2], rndis->ethaddr[3], rndis->ethaddr[4], rndis->ethaddr[5]); - ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "usb"); + ret = gether_setup_name(c->cdev->gadget, rndis->ethaddr, "rndis"); if (ret) { pr_err("%s: gether_setup failed\n", __func__); return ret; @@ -1283,9 +865,8 @@ static struct android_usb_function rndis_function = { .bind_config = rndis_function_bind_config, .unbind_config = rndis_function_unbind_config, .attributes = rndis_function_attributes, - .performance_lock = 1, }; -#endif + struct mass_storage_function_config { struct fsg_config fsg; @@ -1306,21 +887,14 @@ static int mass_storage_function_init(struct android_usb_function *f, if (!config) return -ENOMEM; - if (dev->pdata->nluns) { config->fsg.nluns = dev->pdata->nluns; if (config->fsg.nluns > FSG_MAX_LUNS) config->fsg.nluns = FSG_MAX_LUNS; for (i = 0; i < config->fsg.nluns; i++) { - if (dev->pdata->cdrom_lun & (1 << i)) { - config->fsg.luns[i].cdrom = 1; - config->fsg.luns[i].removable = 1; - config->fsg.luns[i].ro = 1; - } else { - config->fsg.luns[i].cdrom = 0; - config->fsg.luns[i].removable = 1; - config->fsg.luns[i].ro = 0; - } + config->fsg.luns[i].cdrom = 0; + config->fsg.luns[i].removable = 1; + config->fsg.luns[i].ro = 0; } } else { /* default value */ @@ -1328,9 +902,6 @@ static int mass_storage_function_init(struct android_usb_function *f, config->fsg.luns[0].removable = 1; } - config->fsg.vendor_name = dev->pdata->manufacturer_name; - config->fsg.product_name= dev->pdata->product_name; - common = fsg_common_init(NULL, cdev, &config->fsg); if (IS_ERR(common)) { kfree(config); @@ -1436,109 +1007,23 @@ static struct android_usb_function accessory_function = { .ctrlrequest = accessory_function_ctrlrequest, }; -#ifdef CONFIG_USB_ANDROID_PROJECTOR -static int projector_function_init(struct android_usb_function *f, - struct usb_composite_dev *cdev) -{ - return projector_setup(); -} - -static void projector_function_cleanup(struct android_usb_function *f) -{ - kfree(f->config); - f->config = NULL; -} - -static int projector_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - return projector_bind_config(c); -} - -struct android_usb_function projector_function = { - .name = "projector", - .init = projector_function_init, - .cleanup = projector_function_cleanup, - .bind_config = projector_function_bind_config, -}; -#endif - -#ifdef CONFIG_USB_ANDROID_USBNET -static int usbnet_function_init(struct android_usb_function *f, - struct usb_composite_dev *cdev) -{ - return usbnet_setup(); -} - -static int usbnet_function_bind_config(struct android_usb_function *f, - struct usb_configuration *c) -{ - return usbnet_bind_config(c); -} - -static int usbnet_function_ctrlrequest(struct android_usb_function *f, - struct usb_composite_dev *cdev, - const struct usb_ctrlrequest *c) -{ - return usbnet_ctrlrequest(cdev, c); -} - -struct android_usb_function usbnet_function = { - .name = "usbnet", - .init = usbnet_function_init, - .bind_config = usbnet_function_bind_config, - .ctrlrequest = usbnet_function_ctrlrequest, -}; -#endif static struct android_usb_function *supported_functions[] = { -#if 1 -#ifdef CONFIG_USB_ANDROID_RNDIS - &rndis_function, -#endif - &accessory_function, -#ifdef CONFIG_USB_ANDROID_MTP - &mtp_function, - &ptp_function, -#endif - &mass_storage_function, - &adb_function, -#ifdef CONFIG_USB_ANDROID_ECM - &ecm_function, -#endif - &diag_function, - - &modem_function, -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM - &modem_mdm_function, -#endif - &serial_function, -#ifdef CONFIG_USB_ANDROID_PROJECTOR - &projector_function, -#endif -#ifdef CONFIG_USB_ANDROID_ACM - &acm_function, -#endif -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) - &diag_mdm_function, -#endif -#if defined(CONFIG_USB_ANDROID_RMNET_SMD) &rmnet_smd_function, -#elif defined(CONFIG_USB_ANDROID_RMNET_SDIO) &rmnet_sdio_function, -#elif defined(CONFIG_USB_ANDROID_RMNET_SMD_SDIO) &rmnet_smd_sdio_function, -#elif defined(CONFIG_USB_ANDROID_RMNET_BAM) &rmnet_function, -#endif -#if 0 + &diag_function, + &serial_function, + &adb_function, &ccid_function, -#endif -#ifdef CONFIG_USB_ANDROID_USBNET - &usbnet_function, -#endif + &acm_function, + &mtp_function, + &ptp_function, + &rndis_function, + &mass_storage_function, + &accessory_function, NULL -#endif }; @@ -1563,13 +1048,6 @@ static int android_init_functions(struct android_usb_function **functions, goto err_create; } - if (device_create_file(f->dev, &dev_attr_on) < 0) { - pr_err("%s: Failed to create dev file %s", __func__, - f->dev_name); - goto err_create; - } - - if (f->init) { err = f->init(f, cdev); if (err) { @@ -1589,7 +1067,6 @@ static int android_init_functions(struct android_usb_function **functions, __func__, f->name); goto err_out; } - pr_info("%s %s init\n", __func__, f->name); } return 0; @@ -1617,15 +1094,14 @@ static void android_cleanup_functions(struct android_usb_function **functions) } } - static int +static int android_bind_enabled_functions(struct android_dev *dev, - struct usb_configuration *c) + struct usb_configuration *c) { struct android_usb_function *f; int ret; list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - pr_info("%s bind name: %s\n", __func__, f->name); ret = f->bind_config(f, c); if (ret) { pr_err("%s: %s failed", __func__, f->name); @@ -1653,18 +1129,42 @@ static int android_enable_function(struct android_dev *dev, char *name) struct android_usb_function *f; while ((f = *functions++)) { if (!strcmp(name, f->name)) { - pr_info("%s: %s enabled\n", __func__, name); list_add_tail(&f->enabled_list, &dev->enabled_functions); return 0; } } - pr_info("%s: %s failed\n", __func__, name); return -EINVAL; } /*-------------------------------------------------------------------------*/ /* /sys/class/android_usb/android%d/ interface */ +static ssize_t remote_wakeup_show(struct device *pdev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + !!(android_config_driver.bmAttributes & + USB_CONFIG_ATT_WAKEUP)); +} + +static ssize_t remote_wakeup_store(struct device *pdev, + struct device_attribute *attr, const char *buff, size_t size) +{ + int enable = 0; + + sscanf(buff, "%d", &enable); + + pr_debug("android_usb: %s remote wakeup\n", + enable ? "enabling" : "disabling"); + + if (enable) + android_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; + else + android_config_driver.bmAttributes &= ~USB_CONFIG_ATT_WAKEUP; + + return size; +} + static ssize_t functions_show(struct device *pdev, struct device_attribute *attr, char *buf) { @@ -1679,7 +1179,6 @@ functions_show(struct device *pdev, struct device_attribute *attr, char *buf) return buff - buf; } -/* TODO: replace by switch function and enable function */ static ssize_t functions_store(struct device *pdev, struct device_attribute *attr, const char *buff, size_t size) @@ -1689,9 +1188,6 @@ functions_store(struct device *pdev, struct device_attribute *attr, char buf[256], *b; int err; - pr_info("%s: buff: %s\n", __func__, buff); - return size; - INIT_LIST_HEAD(&dev->enabled_functions); strlcpy(buf, buff, sizeof(buf)); @@ -1724,16 +1220,6 @@ static ssize_t enable_store(struct device *pdev, struct device_attribute *attr, int enabled = 0; sscanf(buff, "%d", &enabled); - - if (enabled) - htc_usb_enable_function("adb", 1); - - pr_info("%s, buff: %s\n", __func__, buff); - - /* temporaily return immediately to prevent framework change usb behavior - */ - return size; - if (enabled && !dev->enabled) { /* update values in composite driver's copy of device descriptor */ cdev->desc.idVendor = device_desc.idVendor; @@ -1793,7 +1279,6 @@ field ## _store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ int value; \ - pr_info("%s: %s\n", __func__, buf); \ if (sscanf(buf, format_string, &value) == 1) { \ device_desc.field = value; \ return size; \ @@ -1813,7 +1298,6 @@ static ssize_t \ field ## _store(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ - pr_info("%s: %s\n", __func__, buf); \ if (size >= sizeof(buffer)) return -EINVAL; \ if (sscanf(buf, "%255s", buffer) == 1) { \ return size; \ @@ -1836,6 +1320,8 @@ DESCRIPTOR_STRING_ATTR(iSerial, serial_string) static DEVICE_ATTR(functions, S_IRUGO | S_IWUSR, functions_show, functions_store); static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); static DEVICE_ATTR(state, S_IRUGO, state_show, NULL); +static DEVICE_ATTR(remote_wakeup, S_IRUGO | S_IWUSR, + remote_wakeup_show, remote_wakeup_store); static struct device_attribute *android_usb_attributes[] = { &dev_attr_idVendor, @@ -1850,10 +1336,13 @@ static struct device_attribute *android_usb_attributes[] = { &dev_attr_functions, &dev_attr_enable, &dev_attr_state, + &dev_attr_remote_wakeup, NULL }; +#ifdef CONFIG_USB_HTC_SWITCH_STUB #include "htc_attr.c" +#endif /*-------------------------------------------------------------------------*/ /* Composite driver */ @@ -1880,7 +1369,6 @@ static void android_unbind_config(struct usb_configuration *c) static int android_bind(struct usb_composite_dev *cdev) { struct android_dev *dev = _android_dev; - struct android_usb_platform_data *pdata = _android_dev->pdata; struct usb_gadget *gadget = cdev->gadget; int gcnum, id, ret; @@ -1905,22 +1393,11 @@ static int android_bind(struct usb_composite_dev *cdev) strings_dev[STRING_PRODUCT_IDX].id = id; device_desc.iProduct = id; - dev->products = pdata->products; - dev->num_products = pdata->num_products; - dev->in_house_functions = pdata->functions; - dev->num_functions = pdata->num_functions; - dev->match = pdata->match; - - /* default String */ - if (pdata->product_name) - strlcpy(product_string, pdata->product_name, - sizeof(product_string) - 1); - if (pdata->manufacturer_name) - strlcpy(manufacturer_string, pdata->manufacturer_name, - sizeof(manufacturer_string) - 1); - if (pdata->serial_number) - strlcpy(serial_string, pdata->serial_number, - sizeof(serial_string) - 1); + /* Default strings - should be updated by userspace */ + strlcpy(manufacturer_string, "Android", + sizeof(manufacturer_string) - 1); + strlcpy(product_string, "Android", sizeof(product_string) - 1); + strlcpy(serial_string, "0123456789ABCDEF", sizeof(serial_string) - 1); id = usb_string_id(cdev); if (id < 0) @@ -1947,14 +1424,6 @@ static int android_bind(struct usb_composite_dev *cdev) usb_gadget_set_selfpowered(gadget); dev->cdev = cdev; - - cdev->sw_connect2pc.name = "usb_connect2pc"; - ret = switch_dev_register(&cdev->sw_connect2pc); - if (ret < 0) - pr_err("switch_dev_register fail:usb_connect2pc\n"); - - - schedule_delayed_work(&dev->init_work, HZ); return 0; } @@ -1964,7 +1433,6 @@ static int android_usb_unbind(struct usb_composite_dev *cdev) cancel_work_sync(&dev->work); android_cleanup_functions(dev->functions); - switch_dev_unregister(&cdev->sw_connect2pc); return 0; } @@ -1975,101 +1443,6 @@ static struct usb_composite_driver android_usb_driver = { .unbind = android_usb_unbind, }; -#ifdef CONFIG_USB_ANDROID_USBNET -static struct work_struct reenumeration_work; -static void do_reenumeration_work(struct work_struct *w) -{ - struct android_dev *dev = _android_dev; - int err, funcs, product_id; - - if (dev->enabled != true) { - pr_info("%s: USB driver is not initialize\n", __func__); - return; - } - - mutex_lock(&function_bind_sem); - - funcs = htc_usb_get_func_combine_value(); - usb_gadget_disconnect(dev->cdev->gadget); - usb_remove_config(dev->cdev, &android_config_driver); - - INIT_LIST_HEAD(&dev->enabled_functions); - - if (funcs & (1 << USB_FUNCTION_ADB)) { - err = android_enable_function(dev, "adb"); - if (err) - pr_err("android_usb: Cannot enable '%s'", "adb"); - } - - err = android_enable_function(dev, "usbnet"); - if (err) - pr_err("android_usb: Cannot enable '%s'", "usbnet"); - - product_id = get_product_id(dev, &dev->enabled_functions); - - device_desc.idProduct = __constant_cpu_to_le16(product_id); - dev->cdev->desc.idProduct = device_desc.idProduct; - printk(KERN_INFO "%s:product_id = 0x%04x\n", __func__, product_id); - - usb_add_config(dev->cdev, &android_config_driver, android_bind_config); - mdelay(100); - usb_gadget_connect(dev->cdev->gadget); - dev->enabled = true; - - mutex_unlock(&function_bind_sem); -} - -static int handle_mode_switch(u16 switchIndex, struct usb_composite_dev *cdev) -{ - switch (switchIndex) { - case 0x1F: - /* Enable the USBNet function and disable all others but adb */ - printk(KERN_INFO "[USBNET] %s: 0x%02x\n", __func__, switchIndex); - cdev->desc.bDeviceClass = USB_CLASS_COMM; - break; - /* Add other switch functions */ - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int android_switch_setup(struct usb_gadget *gadget, - const struct usb_ctrlrequest *c) -{ - int value = -EOPNOTSUPP; - u16 wIndex = le16_to_cpu(c->wIndex); - u16 wValue = le16_to_cpu(c->wValue); - u16 wLength = le16_to_cpu(c->wLength); - struct usb_composite_dev *cdev = get_gadget_data(gadget); - struct usb_request *req = cdev->req; - /* struct android_dev *dev = _android_dev; */ - - switch (c->bRequestType & USB_TYPE_MASK) { - case USB_TYPE_VENDOR: - /* If the request is a mode switch , handle it */ - if ((c->bRequest == 1) && (wValue == 0) && (wLength == 0)) { - value = handle_mode_switch(wIndex, cdev); - if (value != 0) - return value; - - req->zero = 0; - req->length = value; - if (usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC)) - printk(KERN_ERR "ep0 in queue failed\n"); - - /* force reenumeration */ - schedule_work(&reenumeration_work); - } - break; - /* Add Other type of requests here */ - default: - break; - } - return value; -} -#endif - static int android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c) { @@ -2085,12 +1458,6 @@ android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c) req->length = 0; gadget->ep0->driver_data = cdev; -#ifdef CONFIG_USB_ANDROID_USBNET - value = android_switch_setup(gadget, c); - if (value >= 0) - return value; -#endif - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { if (f->ctrlrequest) { value = f->ctrlrequest(f, cdev, c); @@ -2158,16 +1525,30 @@ static int android_create_device(struct android_dev *dev) return 0; } +static void android_destroy_device(struct android_dev *dev) +{ + struct device_attribute **attrs = android_usb_attributes; + struct device_attribute *attr; + + while ((attr = *attrs++)) + device_remove_file(dev->dev, attr); + device_destroy(android_class, dev->dev->devt); +} + static int __devinit android_probe(struct platform_device *pdev) { struct android_usb_platform_data *pdata = pdev->dev.platform_data; struct android_dev *dev = _android_dev; - dev->pdata = pdata; +#ifdef CONFIG_USB_HTC_SWITCH_STUB + int err; + err = sysfs_create_group(&pdev->dev.kobj, &htc_attr_group); + if (err) { + pr_err("%s: failed to create HTC USB devices\n", __func__); + } +#endif - init_mfg_serialno(); - if (sysfs_create_group(&pdev->dev.kobj, &android_usb_attr_group)) - pr_err("%s: fail to create sysfs\n", __func__); + dev->pdata = pdata; return 0; } @@ -2176,134 +1557,63 @@ static struct platform_driver android_platform_driver = { .driver = { .name = "android_usb"}, }; -static void android_usb_init_work(struct work_struct *data) -{ - struct android_dev *dev = _android_dev; - struct android_usb_platform_data *pdata = dev->pdata; - struct usb_composite_dev *cdev = dev->cdev; - int ret = 0; - __u16 product_id; - - /* initial ums+adb by default */ - ret = android_enable_function(dev, "mass_storage"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "mass_storage"); - -#if 0 - ret = android_enable_function(dev, "adb"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "adb"); -#endif - - /* initial function depends on radio flag */ - if (pdata->diag_init) { - ret = android_enable_function(dev, "diag"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "diag"); - } - if (pdata->modem_init) { - ret = android_enable_function(dev, "modem"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "modem"); -#if defined(CONFIG_USB_ANDROID_MDM9K_MODEM) - ret = android_enable_function(dev, "modem_mdm"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "modem_mdm"); -#endif - } - -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) - if (pdata->diag_init) { - ret = android_enable_function(dev, "diag_mdm"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "diag_mdm"); - } -#endif - - if (pdata->rmnet_init) { - ret = android_enable_function(dev, "rmnet"); - if (ret) - pr_err("android_usb: Cannot enable '%s'", "rmnet"); - } - - - cdev->desc.idVendor = __constant_cpu_to_le16(pdata->vendor_id), - product_id = get_product_id(dev, &dev->enabled_functions); - - if (dev->match) - product_id = dev->match(product_id, intrsharing); - - cdev->desc.idProduct = __constant_cpu_to_le16(product_id), - cdev->desc.bcdDevice = device_desc.bcdDevice; - cdev->desc.bDeviceClass = device_desc.bDeviceClass; - cdev->desc.bDeviceSubClass = device_desc.bDeviceSubClass; - cdev->desc.bDeviceProtocol = device_desc.bDeviceProtocol; - - device_desc.idVendor = cdev->desc.idVendor; - device_desc.idProduct = cdev->desc.idProduct; - - ret = usb_add_config(cdev, &android_config_driver, - android_bind_config); - - usb_gadget_connect(cdev->gadget); - dev->enabled = true; - pr_info("%s: ret: %d\n", __func__, ret); -} - static int __init init(void) { struct android_dev *dev; - int err; + int ret; - connect2pc = false; android_class = class_create(THIS_MODULE, "android_usb"); if (IS_ERR(android_class)) return PTR_ERR(android_class); dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) + if (!dev) { + pr_err("%s(): Failed to alloc memory for android_dev\n", + __func__); + class_destroy(android_class); return -ENOMEM; - + } dev->functions = supported_functions; INIT_LIST_HEAD(&dev->enabled_functions); INIT_WORK(&dev->work, android_work); - INIT_DELAYED_WORK(&dev->init_work, android_usb_init_work); -#ifdef CONFIG_USB_ANDROID_USBNET - INIT_WORK(&reenumeration_work, do_reenumeration_work); -#endif - err = android_create_device(dev); - if (err) { - class_destroy(android_class); - kfree(dev); - return err; + ret = android_create_device(dev); + if (ret) { + pr_err("%s(): android_create_device failed\n", __func__); + goto err_dev; } - _android_dev = dev; - - wake_lock_init(&android_usb_idle_wake_lock, WAKE_LOCK_IDLE, - "android_usb_idle"); - -#ifdef CONFIG_PERFLOCK - perf_lock_init(&android_usb_perf_lock, PERF_LOCK_HIGHEST, "android_usb"); -#endif - /* Override composite driver functions */ composite_driver.setup = android_setup; composite_driver.disconnect = android_disconnect; - platform_driver_probe(&android_platform_driver, android_probe); + ret = platform_driver_probe(&android_platform_driver, android_probe); + if (ret) { + pr_err("%s(): Failed to register android" + "platform driver\n", __func__); + goto err_probe; + } + ret = usb_composite_probe(&android_usb_driver, android_bind); + if (ret) { + pr_err("%s(): Failed to register android" + "composite driver\n", __func__); + platform_driver_unregister(&android_platform_driver); + goto err_probe; + } + return ret; - return usb_composite_probe(&android_usb_driver, android_bind); +err_probe: + android_destroy_device(dev); +err_dev: + kfree(dev); + class_destroy(android_class); + return ret; } module_init(init); static void __exit cleanup(void) { - - wake_lock_destroy(&android_usb_idle_wake_lock); - usb_composite_unregister(&android_usb_driver); class_destroy(android_class); kfree(_android_dev); diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c index 3ece2d3332a..47505cedaff 100644 --- a/drivers/usb/gadget/ci13xxx_msm.c +++ b/drivers/usb/gadget/ci13xxx_msm.c @@ -14,13 +14,8 @@ #include #include #include -#include #include #include -#include - -#include -static struct usb_info *the_usb_info; #include "ci13xxx_udc.c" @@ -33,13 +28,13 @@ static irqreturn_t msm_udc_irq(int irq, void *data) static void ci13xxx_msm_notify_event(struct ci13xxx *udc, unsigned event) { - /* struct device *dev = udc->gadget.dev.parent; */ + struct device *dev = udc->gadget.dev.parent; switch (event) { case CI13XXX_CONTROLLER_RESET_EVENT: dev_dbg(dev, "CI13XXX_CONTROLLER_RESET_EVENT received\n"); writel(0, USB_AHBBURST); - writel(0, USB_AHBMODE); + writel_relaxed(0x08, USB_AHBMODE); break; default: dev_dbg(dev, "unknown ci13xxx_udc event\n"); @@ -52,8 +47,8 @@ static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = { .flags = CI13XXX_REGS_SHARED | CI13XXX_REQUIRE_TRANSCEIVER | CI13XXX_PULLUP_ON_VBUS | - CI13XXX_DISABLE_STREAMING | - CI13XXX_ZERO_ITC, + CI13XXX_ZERO_ITC | + CI13XXX_DISABLE_STREAMING, .notify_event = ci13xxx_msm_notify_event, }; @@ -61,16 +56,11 @@ static struct ci13xxx_udc_driver ci13xxx_msm_udc_driver = { static int ci13xxx_msm_probe(struct platform_device *pdev) { struct resource *res; - struct usb_info *ui; void __iomem *regs; int irq; int ret; dev_dbg(&pdev->dev, "ci13xxx_msm_probe\n"); - ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); - if (!ui) - return -ENOMEM; - the_usb_info = ui; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { @@ -116,24 +106,8 @@ static int ci13xxx_msm_probe(struct platform_device *pdev) return ret; } -static void ci13xxx_msm_shutdown(struct platform_device *pdev) -{ - struct msm_otg *motg; - struct ci13xxx *udc = _udc; - - if (!udc || !udc->transceiver) - return; - - motg = container_of(udc->transceiver, struct msm_otg, otg); - - if (!atomic_read(&motg->in_lpm)) - ci13xxx_pullup(&udc->gadget, 0); - -} - static struct platform_driver ci13xxx_msm_driver = { .probe = ci13xxx_msm_probe, - .shutdown = ci13xxx_msm_shutdown, .driver = { .name = "msm_hsusb", }, }; diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c index d0f3cc32c5c..287c9933873 100644 --- a/drivers/usb/gadget/ci13xxx_udc.c +++ b/drivers/usb/gadget/ci13xxx_udc.c @@ -155,6 +155,7 @@ static struct { #define CAP_ENDPTLISTADDR (0x018UL) #define CAP_PORTSC (0x044UL) #define CAP_DEVLC (0x084UL) +#define CAP_ENDPTPIPEID (0x0BCUL) #define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL) #define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL) #define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL) @@ -367,43 +368,17 @@ static int hw_device_state(u32 dma) * * This function returns an error code */ -#define FLUSH_WAIT_US 5 -#define FLUSH_TIMEOUT (2 * (USEC_PER_SEC / FLUSH_WAIT_US)) static int hw_ep_flush(int num, int dir) { - uint32_t unflushed = 0; - uint32_t stat = 0; - int cnt = 0; int n = hw_ep_bit(num, dir); - /* flush endpoint, canceling transactions - ** - this can take a "large amount of time" (per databook) - ** - the flush can fail in some cases, thus we check STAT - ** and repeat if we're still operating - ** (does the fact that this doesn't use the tripwire matter?!) - */ - while (cnt < FLUSH_TIMEOUT) { + do { + /* flush any pending transfer */ hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n)); - while ((unflushed = hw_cread(CAP_ENDPTFLUSH, BIT(n))) && - cnt < FLUSH_TIMEOUT) { - cnt++; - udelay(FLUSH_WAIT_US); - } - - stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); - if (cnt >= FLUSH_TIMEOUT) - goto err; - if (!stat) - goto done; - cnt++; - udelay(FLUSH_WAIT_US); - } + while (hw_cread(CAP_ENDPTFLUSH, BIT(n))) + cpu_relax(); + } while (hw_cread(CAP_ENDPTSTAT, BIT(n))); -err: - USB_WARNING("%s: Could not complete flush! NOT GOOD! " - "stat: %x unflushed: %x bits: %x\n", __func__, - stat, unflushed, n); -done: return 0; } @@ -903,7 +878,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra) stamp = stamp * 1000000 + tval.tv_usec; scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG, - "%04X\t?%02X %-7.7s %4i \t%s\n", + "%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); dbg_inc(&dbg_data.idx); @@ -911,7 +886,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra) write_unlock_irqrestore(&dbg_data.lck, flags); if (dbg_data.tty != 0) - pr_notice("%04X\t?%02X %-7.7s %4i \t%s\n", + pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n", stamp, addr, name, status, extra); } @@ -1071,15 +1046,15 @@ static ssize_t show_inters(struct device *dev, struct device_attribute *attr, n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n", isr_statistics.test); - n += scnprintf(buf + n, PAGE_SIZE - n, "?ui = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n", isr_statistics.ui); - n += scnprintf(buf + n, PAGE_SIZE - n, "?uei = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n", isr_statistics.uei); - n += scnprintf(buf + n, PAGE_SIZE - n, "?pci = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n", isr_statistics.pci); - n += scnprintf(buf + n, PAGE_SIZE - n, "?uri = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n", isr_statistics.uri); - n += scnprintf(buf + n, PAGE_SIZE - n, "?sli = %d\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n", isr_statistics.sli); n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n", isr_statistics.none); @@ -1664,6 +1639,23 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) if (!mReq->req.no_interrupt) mReq->ptr->token |= TD_IOC; } + + /* MSM Specific: updating the request as required for + * SPS mode. Enable MSM proprietary DMA engine acording + * to the UDC private data in the request. + */ + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { + if (mReq->req.udc_priv & MSM_SPS_MODE) { + mReq->ptr->token = TD_STATUS_ACTIVE; + if (mReq->req.udc_priv & MSM_TBE) + mReq->ptr->next = TD_TERMINATE; + else + mReq->ptr->next = MSM_ETD_TYPE | mReq->dma; + if (!mReq->req.no_interrupt) + mReq->ptr->token |= MSM_ETD_IOC; + } + } + mReq->ptr->page[0] = mReq->req.dma; for (i = 1; i < 5; i++) mReq->ptr->page[i] = @@ -1683,14 +1675,10 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) wmb(); if (hw_cread(CAP_ENDPTPRIME, BIT(n))) goto done; - i = 0; do { hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW); tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n)); - mb(); - } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW) && (i++ < 100)); - if (i == 100) - USBH_ERR("%s: Write USBCMD_ATDTW failed\n", __func__); + } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW)); hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0); if (tmp_stat) goto done; @@ -1710,6 +1698,39 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) } mEp->qh.ptr->td.next = mReq->dma; /* TERMINATE = 0 */ + + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { + if (mReq->req.udc_priv & MSM_SPS_MODE) { + mEp->qh.ptr->td.next |= MSM_ETD_TYPE; + i = hw_cread(CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), ~0); + /* Read current value of this EPs pipe id */ + i = (mEp->dir == TX) ? + ((i >> MSM_TX_PIPE_ID_OFS) & MSM_PIPE_ID_MASK) : + (i & MSM_PIPE_ID_MASK); + /* If requested pipe id is different from current, + then write it */ + if (i != (mReq->req.udc_priv & MSM_PIPE_ID_MASK)) { + if (mEp->dir == TX) + hw_cwrite( + CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + MSM_PIPE_ID_MASK << + MSM_TX_PIPE_ID_OFS, + (mReq->req.udc_priv & + MSM_PIPE_ID_MASK) + << MSM_TX_PIPE_ID_OFS); + else + hw_cwrite( + CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + MSM_PIPE_ID_MASK, + mReq->req.udc_priv & + MSM_PIPE_ID_MASK); + } + } + } + mEp->qh.ptr->td.token &= ~TD_STATUS; /* clear status */ mEp->qh.ptr->cap |= QH_ZLT; @@ -1742,6 +1763,10 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) return -EBUSY; + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) + if ((mReq->req.udc_priv & MSM_SPS_MODE) && + (mReq->req.udc_priv & MSM_TBE)) + return -EBUSY; if (mReq->zptr) { if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) return -EBUSY; @@ -1759,19 +1784,12 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) } mReq->req.status = mReq->ptr->token & TD_STATUS; - if ((TD_STATUS_HALTED & mReq->req.status) != 0) { - USB_ERR("%s: HALTED EP%d %s %6d\n", __func__, mEp->num, - ((mEp->dir == TX)? "I":"O"), mReq->req.length); + if ((TD_STATUS_HALTED & mReq->req.status) != 0) mReq->req.status = -1; - } else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) { - USB_ERR("%s: DT_ERR EP%d %s %6d\n", __func__, mEp->num, - ((mEp->dir == TX)? "I":"O"), mReq->req.length); + else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) mReq->req.status = -1; - } else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) { - USB_ERR("%s: TR_ERR EP%d %s %6d\n", __func__, mEp->num, - ((mEp->dir == TX)? "I":"O"), mReq->req.length); + else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) mReq->req.status = -1; - } mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES; mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES); @@ -1793,6 +1811,7 @@ __releases(mEp->lock) __acquires(mEp->lock) { struct ci13xxx_ep *mEpTemp = mEp; + unsigned val; trace("%p", mEp); @@ -1808,6 +1827,21 @@ __acquires(mEp->lock) list_entry(mEp->qh.queue.next, struct ci13xxx_req, queue); list_del_init(&mReq->queue); + + /* MSM Specific: Clear end point proprietary register */ + if (CI13XX_REQ_VENDOR_ID(mReq->req.udc_priv) == MSM_VENDOR_ID) { + if (mReq->req.udc_priv & MSM_SPS_MODE) { + val = hw_cread(CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + ~0); + + if (val != MSM_EP_PIPE_ID_RESET_VAL) + hw_cwrite( + CAP_ENDPTPIPEID + + mEp->num * sizeof(u32), + ~0, MSM_EP_PIPE_ID_RESET_VAL); + } + } mReq->req.status = -ESHUTDOWN; if (mReq->map) { @@ -1837,7 +1871,7 @@ __acquires(mEp->lock) * This function returns an error code * Caller must hold lock */ -static int _gadget_stop_activity(struct usb_gadget *gadget, int mute) +static int _gadget_stop_activity(struct usb_gadget *gadget) { struct usb_ep *ep; struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget); @@ -1861,16 +1895,8 @@ static int _gadget_stop_activity(struct usb_gadget *gadget, int mute) } usb_ep_fifo_flush(&udc->ep0out.ep); usb_ep_fifo_flush(&udc->ep0in.ep); - /* cancel pending ep0 transactions */ - spin_lock(udc->lock); - _ep_nuke(&udc->ep0out); - _ep_nuke(&udc->ep0in); - spin_unlock(udc->lock); - if (mute) - udc->driver->mute_disconnect(gadget); - else - udc->driver->disconnect(gadget); + udc->driver->disconnect(gadget); /* make sure to disable all endpoints */ gadget_for_each_ep(ep, gadget) { @@ -1910,7 +1936,12 @@ __acquires(udc->lock) dbg_event(0xFF, "BUS RST", 0); spin_unlock(udc->lock); - retval = _gadget_stop_activity(&udc->gadget, 1); + + /*stop charging upon reset */ + if (udc->transceiver) + otg_set_power(udc->transceiver, 0); + + retval = _gadget_stop_activity(&udc->gadget); if (retval) goto done; @@ -2053,8 +2084,11 @@ __acquires(mEp->lock) trace("%p", udc); mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in; - udc->status->context = udc; - udc->status->complete = isr_setup_status_complete; + if (udc->status) { + udc->status->context = udc; + udc->status->complete = isr_setup_status_complete; + } else + return -EINVAL; spin_unlock(mEp->lock); retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC); @@ -2754,7 +2788,7 @@ static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active) hw_device_state(udc->ep0out.qh.dma); } else { hw_device_state(0); - _gadget_stop_activity(&udc->gadget, 0); + _gadget_stop_activity(&udc->gadget); pm_runtime_put_sync(&_gadget->dev); } } @@ -2785,8 +2819,6 @@ static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_active) } spin_unlock_irqrestore(udc->lock, flags); - USB_INFO("%s: %d\n", __func__, is_active); - if (is_active) hw_device_state(udc->ep0out.qh.dma); else @@ -2974,7 +3006,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) || udc->vbus_active) { hw_device_state(0); - _gadget_stop_activity(&udc->gadget, 0); + _gadget_stop_activity(&udc->gadget); pm_runtime_put(&udc->gadget.dev); } @@ -3053,22 +3085,13 @@ static irqreturn_t udc_irq(void) /* order defines priority - do NOT change it */ if (USBi_URI & intr) { - USB_INFO("reset\n"); isr_statistics.uri++; isr_reset_handler(udc); - - if (udc->transceiver) - udc->transceiver->notify_usb_attached(); } if (USBi_PCI & intr) { isr_statistics.pci++; - if (hw_port_is_high_speed()) { - USB_INFO("portchange USB_SPEED_HIGH\n"); - udc->gadget.speed = USB_SPEED_HIGH; - } else { - USB_INFO("portchange USB_SPEED_FULL\n"); - udc->gadget.speed = USB_SPEED_FULL; - } + udc->gadget.speed = hw_port_is_high_speed() ? + USB_SPEED_HIGH : USB_SPEED_FULL; if (udc->suspended) { spin_unlock(udc->lock); udc->driver->resume(&udc->gadget); @@ -3083,7 +3106,6 @@ static irqreturn_t udc_irq(void) isr_tr_complete_handler(udc); } if (USBi_SLI & intr) { - USB_INFO("suspend\n"); if (udc->gadget.speed != USB_SPEED_UNKNOWN) { udc->suspended = 1; spin_unlock(udc->lock); diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h index b917fe9b4c7..35b0712bd48 100644 --- a/drivers/usb/gadget/ci13xxx_udc.h +++ b/drivers/usb/gadget/ci13xxx_udc.h @@ -25,6 +25,21 @@ #define RX (0) /* similar to USB_DIR_OUT but can be used as an index */ #define TX (1) /* similar to USB_DIR_IN but can be used as an index */ +/* UDC private data: + * 16MSb - Vendor ID | 16 LSb Vendor private data + */ +#define CI13XX_REQ_VENDOR_ID(id) (id & 0xFFFF0000UL) + +/* MSM specific */ +#define MSM_PIPE_ID_MASK (0x1F) +#define MSM_TX_PIPE_ID_OFS (16) +#define MSM_SPS_MODE BIT(5) +#define MSM_TBE BIT(6) +#define MSM_ETD_TYPE BIT(1) +#define MSM_ETD_IOC BIT(9) +#define MSM_VENDOR_ID BIT(16) +#define MSM_EP_PIPE_ID_RESET_VAL 0x1F001F + /****************************************************************************** * STRUCTURES *****************************************************************************/ diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 48cda33ed5f..4c33695695c 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -73,54 +73,8 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); static char composite_manufacturer[50]; - -int htcctusbcmd; - -static ssize_t print_switch_name(struct switch_dev *sdev, char *buf) -{ - return sprintf(buf, "%s\n", sdev->name); -} - -static ssize_t print_switch_state(struct switch_dev *sdev, char *buf) -{ - - return sprintf(buf, "%s\n", (htcctusbcmd ? "Capture" : "None")); -} - -struct switch_dev compositesdev = { - .name = "htcctusbcmd", - .print_name = print_switch_name, - .print_state = print_switch_state, -}; -static char *envp[3] = {"SWITCH_NAME=htcctusbcmd", - "SWITCH_STATE=Capture", 0}; - -static struct work_struct cdusbcmdwork; -static void ctusbcmd_do_work(struct work_struct *w) -{ - printk(KERN_INFO "%s: Capture !\n", __func__); - kobject_uevent_env(&compositesdev.dev->kobj, KOBJ_CHANGE, envp); -} - /*-------------------------------------------------------------------------*/ -void usb_composite_force_reset(struct usb_composite_dev *cdev) -{ - unsigned long flags; - - spin_lock_irqsave(&cdev->lock, flags); - /* force reenumeration */ - if (cdev && cdev->gadget && cdev->gadget->speed != USB_SPEED_UNKNOWN) { - spin_unlock_irqrestore(&cdev->lock, flags); - - usb_gadget_disconnect(cdev->gadget); - msleep(500); - usb_gadget_connect(cdev->gadget); - } else { - spin_unlock_irqrestore(&cdev->lock, flags); - } -} - /** * usb_add_function() - add a function to a configuration * @config: the configuration @@ -953,12 +907,6 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) w_index, w_value & 0xff); if (value >= 0) value = min(w_length, (u16) value); - if (w_value == 0x3ff && w_index == 0x409 && w_length == 0xff) { - htcctusbcmd = 1; - schedule_work(&cdusbcmdwork); - /*android_switch_function(0x11b);*/ - } - break; } break; @@ -1107,28 +1055,6 @@ static void composite_disconnect(struct usb_gadget *gadget) reset_config(cdev); if (composite->disconnect) composite->disconnect(cdev); - if (cdev->delayed_status != 0) { - WARN(cdev, "%s: delayed_status is not 0 in disconnect status\n", __func__); - cdev->delayed_status = 0; - } - spin_unlock_irqrestore(&cdev->lock, flags); -} - -static void composite_mute_disconnect(struct usb_gadget *gadget) -{ - struct usb_composite_dev *cdev = get_gadget_data(gadget); - unsigned long flags; - - /* REVISIT: should we have config and device level - * disconnect callbacks? - */ - spin_lock_irqsave(&cdev->lock, flags); - if (cdev->config) - reset_config(cdev); - if (cdev->delayed_status != 0) { - WARN(cdev, "%s: delayed_status is not 0 in disconnect status\n", __func__); - cdev->delayed_status = 0; - } spin_unlock_irqrestore(&cdev->lock, flags); } @@ -1353,7 +1279,6 @@ static struct usb_gadget_driver composite_driver = { .setup = composite_setup, .disconnect = composite_disconnect, - .mute_disconnect = composite_mute_disconnect, .suspend = composite_suspend, .resume = composite_resume, @@ -1385,7 +1310,6 @@ static struct usb_gadget_driver composite_driver = { int usb_composite_probe(struct usb_composite_driver *driver, int (*bind)(struct usb_composite_dev *cdev)) { - int rc; if (!driver || !driver->dev || !bind || composite) return -EINVAL; @@ -1398,10 +1322,6 @@ int usb_composite_probe(struct usb_composite_driver *driver, composite = driver; composite_gadget_bind = bind; - rc = switch_dev_register(&compositesdev); - INIT_WORK(&cdusbcmdwork, ctusbcmd_do_work); - if (rc < 0) - pr_err("%s: switch_dev_register fail", __func__); return usb_gadget_probe_driver(&composite_driver, composite_bind); } diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index d3dcabc1a5f..90977fc04ea 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -122,10 +122,7 @@ static const char ep0name [] = "ep0"; static const char *const ep_name [] = { ep0name, /* everyone has ep0 */ - /* act like a net2280: high speed, six configurable endpoints */ - "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f", - - /* or like pxa250: fifteen fixed function endpoints */ + /* act like a pxa250: fifteen fixed function endpoints */ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int", "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int", "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso", @@ -133,6 +130,10 @@ static const char *const ep_name [] = { /* or like sa1100: two fixed function endpoints */ "ep1out-bulk", "ep2in-bulk", + + /* and now some generic EPs so we have enough in multi config */ + "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in", + "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out", }; #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name) diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c index 8cbb301c804..05e65e5cd70 100644 --- a/drivers/usb/gadget/f_accessory.c +++ b/drivers/usb/gadget/f_accessory.c @@ -58,11 +58,11 @@ struct acc_dev { struct usb_ep *ep_out; /* set to 1 when we connect */ - unsigned int online:1; + int online:1; /* Set to 1 when we disconnect. * Not cleared until our file is closed. */ - unsigned int disconnected:1; + int disconnected:1; /* strings sent by the host */ char manufacturer[ACC_STRING_SIZE]; diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index bd6226cbae8..380ef87cc92 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -5,6 +5,7 @@ * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * Copyright (C) 2009 by Samsung Electronics + * Copyright (c) 2011 Code Aurora Forum. All rights reserved. * Author: Michal Nazarewicz (m.nazarewicz@samsung.com) * * This software is distributed under the terms of the GNU General @@ -17,6 +18,8 @@ #include #include #include +#include +#include #include "u_serial.h" #include "gadget_chips.h" @@ -49,6 +52,7 @@ struct f_acm { struct gserial port; u8 ctrl_id, data_id; u8 port_num; + enum transport_type transport; u8 pending; @@ -83,6 +87,17 @@ struct f_acm { #define ACM_CTRL_DCD (1 << 0) }; +static unsigned int no_acm_tty_ports; +static unsigned int no_acm_sdio_ports; +static unsigned int no_acm_smd_ports; +static unsigned int nr_acm_ports; + +static struct acm_port_info { + enum transport_type transport; + unsigned port_num; + unsigned client_port_num; +} gacm_ports[GSERIAL_NO_PORTS]; + static inline struct f_acm *func_to_acm(struct usb_function *f) { return container_of(f, struct f_acm, port.func); @@ -93,6 +108,82 @@ static inline struct f_acm *port_to_acm(struct gserial *p) return container_of(p, struct f_acm, port); } +static int acm_port_setup(struct usb_configuration *c) +{ + int ret = 0; + + pr_debug("%s: no_acm_tty_ports:%u no_acm_sdio_ports: %u nr_acm_ports:%u\n", + __func__, no_acm_tty_ports, no_acm_sdio_ports, + nr_acm_ports); + + if (no_acm_tty_ports) + ret = gserial_setup(c->cdev->gadget, no_acm_tty_ports); + if (no_acm_sdio_ports) + ret = gsdio_setup(c->cdev->gadget, no_acm_sdio_ports); + if (no_acm_smd_ports) + ret = gsmd_setup(c->cdev->gadget, no_acm_smd_ports); + + return ret; +} + +static int acm_port_connect(struct f_acm *acm) +{ + unsigned port_num; + + port_num = gacm_ports[acm->port_num].client_port_num; + + + pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_port_no:%d\n", + __func__, xport_to_str(acm->transport), + acm, &acm->port, acm->port_num, port_num); + + switch (acm->transport) { + case USB_GADGET_XPORT_TTY: + gserial_connect(&acm->port, port_num); + break; + case USB_GADGET_XPORT_SDIO: + gsdio_connect(&acm->port, port_num); + break; + case USB_GADGET_XPORT_SMD: + gsmd_connect(&acm->port, port_num); + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(acm->transport)); + return -ENODEV; + } + + return 0; +} + +static int acm_port_disconnect(struct f_acm *acm) +{ + unsigned port_num; + + port_num = gacm_ports[acm->port_num].client_port_num; + + pr_debug("%s: transport:%s f_acm:%p gserial:%p port_num:%d cl_pno:%d\n", + __func__, xport_to_str(acm->transport), + acm, &acm->port, acm->port_num, port_num); + + switch (acm->transport) { + case USB_GADGET_XPORT_TTY: + gserial_disconnect(&acm->port); + break; + case USB_GADGET_XPORT_SDIO: + gsdio_disconnect(&acm->port, port_num); + break; + case USB_GADGET_XPORT_SMD: + gsmd_disconnect(&acm->port, port_num); + break; + default: + pr_err("%s: Un-supported transport:%s\n", __func__, + xport_to_str(acm->transport)); + return -ENODEV; + } + + return 0; +} /*-------------------------------------------------------------------------*/ /* notification endpoint uses smallish and infrequent fixed-size messages */ @@ -333,8 +424,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* SET_LINE_CODING ... just read and save what the host sends */ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_SET_LINE_CODING: - if (w_length != sizeof(struct usb_cdc_line_coding) - || w_index != acm->ctrl_id) + if (w_length != sizeof(struct usb_cdc_line_coding)) goto invalid; value = w_length; @@ -345,8 +435,6 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* GET_LINE_CODING ... return what host sent, or initial value */ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_GET_LINE_CODING: - if (w_index != acm->ctrl_id) - goto invalid; value = min_t(unsigned, w_length, sizeof(struct usb_cdc_line_coding)); @@ -356,9 +444,6 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) /* SET_CONTROL_LINE_STATE ... save what the host sent */ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_SET_CONTROL_LINE_STATE: - if (w_index != acm->ctrl_id) - goto invalid; - value = 0; /* FIXME we should not allow data to flow until the @@ -366,6 +451,12 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) * that bit, we should return to that no-flow state. */ acm->port_handshake_bits = w_value; + if (acm->port.notify_modem) { + unsigned port_num = + gacm_ports[acm->port_num].client_port_num; + + acm->port.notify_modem(&acm->port, port_num, w_value); + } break; default: @@ -405,25 +496,25 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) usb_ep_disable(acm->notify); } else { VDBG(cdev, "init acm ctrl interface %d\n", intf); - acm->notify_desc = ep_choose(cdev->gadget, - acm->hs.notify, - acm->fs.notify); } + acm->notify_desc = ep_choose(cdev->gadget, + acm->hs.notify, + acm->fs.notify); usb_ep_enable(acm->notify, acm->notify_desc); acm->notify->driver_data = acm; } else if (intf == acm->data_id) { if (acm->port.in->driver_data) { DBG(cdev, "reset acm ttyGS%d\n", acm->port_num); - gserial_disconnect(&acm->port); + acm_port_disconnect(acm); } else { DBG(cdev, "activate acm ttyGS%d\n", acm->port_num); - acm->port.in_desc = ep_choose(cdev->gadget, - acm->hs.in, acm->fs.in); - acm->port.out_desc = ep_choose(cdev->gadget, - acm->hs.out, acm->fs.out); } - gserial_connect(&acm->port, acm->port_num); + acm->port.in_desc = ep_choose(cdev->gadget, + acm->hs.in, acm->fs.in); + acm->port.out_desc = ep_choose(cdev->gadget, + acm->hs.out, acm->fs.out); + acm_port_connect(acm); } else return -EINVAL; @@ -437,7 +528,7 @@ static void acm_disable(struct usb_function *f) struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num); - gserial_disconnect(&acm->port); + acm_port_disconnect(acm); usb_ep_disable(acm->notify); acm->notify->driver_data = NULL; } @@ -568,6 +659,15 @@ static int acm_send_break(struct gserial *port, int duration) return acm_notify_serial_state(acm); } +static int acm_send_modem_ctrl_bits(struct gserial *port, int ctrl_bits) +{ + struct f_acm *acm = port_to_acm(port); + + acm->serial_state = ctrl_bits; + + return acm_notify_serial_state(acm); +} + /*-------------------------------------------------------------------------*/ /* ACM function driver setup/binding */ @@ -655,6 +755,8 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(acm_hs_function); + if (!f->hs_descriptors) + goto fail; acm->hs.in = usb_find_endpoint(acm_hs_function, f->hs_descriptors, &acm_hs_in_desc); @@ -672,6 +774,11 @@ acm_bind(struct usb_configuration *c, struct usb_function *f) return 0; fail: + if (f->hs_descriptors) + usb_free_descriptors(f->hs_descriptors); + if (f->descriptors) + usb_free_descriptors(f->descriptors); + if (acm->notify_req) gs_free_req(acm->notify, acm->notify_req); @@ -697,6 +804,7 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); gs_free_req(acm->notify, acm->notify_req); + kfree(acm->port.func.name); kfree(acm); } @@ -763,12 +871,18 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) spin_lock_init(&acm->lock); acm->port_num = port_num; + acm->transport = gacm_ports[port_num].transport; acm->port.connect = acm_connect; acm->port.disconnect = acm_disconnect; acm->port.send_break = acm_send_break; + acm->port.send_modem_ctrl_bits = acm_send_modem_ctrl_bits; - acm->port.func.name = "acm"; + acm->port.func.name = kasprintf(GFP_KERNEL, "acm%u", port_num + 1); + if (!acm->port.func.name) { + kfree(acm); + return -ENOMEM; + } acm->port.func.strings = acm_strings; /* descriptors are per-instance copies */ acm->port.func.bind = acm_bind; @@ -782,3 +896,44 @@ int acm_bind_config(struct usb_configuration *c, u8 port_num) kfree(acm); return status; } + +/** + * acm_init_port - bind a acm_port to its transport + */ +static int acm_init_port(int port_num, const char *name) +{ + enum transport_type transport; + + if (port_num >= GSERIAL_NO_PORTS) + return -ENODEV; + + transport = str_to_xport(name); + pr_debug("%s, port:%d, transport:%s\n", __func__, + port_num, xport_to_str(transport)); + + gacm_ports[port_num].transport = transport; + gacm_ports[port_num].port_num = port_num; + + switch (transport) { + case USB_GADGET_XPORT_TTY: + gacm_ports[port_num].client_port_num = no_acm_tty_ports; + no_acm_tty_ports++; + break; + case USB_GADGET_XPORT_SDIO: + gacm_ports[port_num].client_port_num = no_acm_sdio_ports; + no_acm_sdio_ports++; + break; + case USB_GADGET_XPORT_SMD: + gacm_ports[port_num].client_port_num = no_acm_smd_ports; + no_acm_smd_ports++; + break; + default: + pr_err("%s: Un-supported transport transport: %u\n", + __func__, gacm_ports[port_num].transport); + return -ENODEV; + } + + nr_acm_ports++; + + return 0; +} diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c index 686b9ff9088..cae01368c20 100644 --- a/drivers/usb/gadget/f_adb.c +++ b/drivers/usb/gadget/f_adb.c @@ -27,10 +27,6 @@ #include #include -#define ADB_IOCTL_MAGIC 's' -#define ADB_ERR_PAYLOAD_STUCK _IOW(ADB_IOCTL_MAGIC, 0, unsigned) -#define ADB_ATS_ENABLE _IOR(ADB_IOCTL_MAGIC, 1, unsigned) - #define ADB_BULK_BUFFER_SIZE 4096 /* number of tx requests to allocate */ @@ -118,7 +114,6 @@ static struct usb_descriptor_header *hs_adb_descs[] = { /* temporary variable used between adb_open() and adb_gadget_bind() */ static struct adb_dev *_adb_dev; -int board_get_usb_ats(void); static inline struct adb_dev *func_to_adb(struct usb_function *f) { @@ -197,10 +192,9 @@ static void adb_complete_in(struct usb_ep *ep, struct usb_request *req) { struct adb_dev *dev = _adb_dev; - if (req->status != 0) { - printk(KERN_INFO "[USB] %s: err (%d)\n", __func__, req->status); + if (req->status != 0) atomic_set(&dev->error, 1); - } + adb_req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); @@ -211,10 +205,9 @@ static void adb_complete_out(struct usb_ep *ep, struct usb_request *req) struct adb_dev *dev = _adb_dev; dev->rx_done = 1; - if (req->status != 0) { - printk(KERN_INFO "[USB] %s: err (%d)\n", __func__, req->status); + if (req->status != 0) atomic_set(&dev->error, 1); - } + wake_up(&dev->read_wq); } @@ -452,54 +445,7 @@ static struct miscdevice adb_device = { .fops = &adb_fops, }; -int htc_usb_enable_function(char *name, int ebl); -static int adb_enable_open(struct inode *ip, struct file *fp) -{ - printk(KERN_INFO "[USB] enabling adb\n"); - htc_usb_enable_function("adb", 1); - return 0; -} - -static int adb_enable_release(struct inode *ip, struct file *fp) -{ - printk(KERN_INFO "[USB] disabling adb\n"); - htc_usb_enable_function("adb", 0); - return 0; -} - -static long adb_enable_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - int rc = 0; - switch (cmd) { - case ADB_ERR_PAYLOAD_STUCK: { - printk(KERN_INFO "[USB] adbd read payload stuck (reset ADB)\n"); - break; - } - case ADB_ATS_ENABLE: { - printk(KERN_INFO "[USB] ATS enable = %d\n",board_get_usb_ats()); - rc = put_user(board_get_usb_ats(),(int __user *)arg); - break; - } - default: - rc = -EINVAL; - } - return rc; -} - -static const struct file_operations adb_enable_fops = { - .owner = THIS_MODULE, - .open = adb_enable_open, - .release = adb_enable_release, - .unlocked_ioctl = adb_enable_ioctl, -}; - -static struct miscdevice adb_enable_device = { - .minor = MISC_DYNAMIC_MINOR, - .name = "android_adb_enable", - .fops = &adb_enable_fops, -}; static int @@ -646,10 +592,6 @@ static int adb_setup(void) if (ret) goto err; - ret = misc_register(&adb_enable_device); - if (ret) - goto err; - return 0; err: @@ -661,7 +603,6 @@ static int adb_setup(void) static void adb_cleanup(void) { misc_deregister(&adb_device); - misc_deregister(&adb_enable_device); kfree(_adb_dev); _adb_dev = NULL; diff --git a/drivers/usb/gadget/f_ccid.c b/drivers/usb/gadget/f_ccid.c new file mode 100644 index 00000000000..a11f439b306 --- /dev/null +++ b/drivers/usb/gadget/f_ccid.c @@ -0,0 +1,1014 @@ +/* + * f_ccid.c -- CCID function Driver + * + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "f_ccid.h" + +#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header) +#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header) +#define CTRL_BUF_SIZE 4 +#define FUNCTION_NAME "ccid" +#define CCID_NOTIFY_INTERVAL 5 +#define CCID_NOTIFY_MAXPACKET 4 + +/* number of tx requests to allocate */ +#define TX_REQ_MAX 4 + +struct ccid_descs { + struct usb_endpoint_descriptor *in; + struct usb_endpoint_descriptor *out; + struct usb_endpoint_descriptor *notify; +}; + +struct ccid_ctrl_dev { + atomic_t opened; + struct list_head tx_q; + wait_queue_head_t tx_wait_q; + unsigned char buf[CTRL_BUF_SIZE]; + int tx_ctrl_done; +}; + +struct ccid_bulk_dev { + atomic_t error; + atomic_t opened; + atomic_t rx_req_busy; + wait_queue_head_t read_wq; + wait_queue_head_t write_wq; + struct usb_request *rx_req; + int rx_done; + struct list_head tx_idle; +}; + +struct f_ccid { + struct usb_function function; + struct usb_composite_dev *cdev; + int ifc_id; + spinlock_t lock; + atomic_t online; + /* usb descriptors */ + struct ccid_descs fs; + struct ccid_descs hs; + /* usb eps*/ + struct usb_ep *notify; + struct usb_ep *in; + struct usb_ep *out; + struct usb_endpoint_descriptor *in_desc; + struct usb_endpoint_descriptor *out_desc; + struct usb_endpoint_descriptor *notify_desc; + struct usb_request *notify_req; + struct ccid_ctrl_dev ctrl_dev; + struct ccid_bulk_dev bulk_dev; + int dtr_state; +}; + +static struct f_ccid *_ccid_dev; +static struct miscdevice ccid_bulk_device; +static struct miscdevice ccid_ctrl_device; + +/* Interface Descriptor: */ +static struct usb_interface_descriptor ccid_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_CSCID, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, +}; +/* CCID Class Descriptor */ +static struct usb_ccid_class_descriptor ccid_class_desc = { + .bLength = sizeof(ccid_class_desc), + .bDescriptorType = CCID_DECRIPTOR_TYPE, + .bcdCCID = CCID1_10, + .bMaxSlotIndex = 0, + /* This value indicates what voltages the CCID can supply to slots */ + .bVoltageSupport = VOLTS_3_0, + .dwProtocols = PROTOCOL_TO, + /* Default ICC clock frequency in KHz */ + .dwDefaultClock = 3580, + /* Maximum supported ICC clock frequency in KHz */ + .dwMaximumClock = 3580, + .bNumClockSupported = 0, + /* Default ICC I/O data rate in bps */ + .dwDataRate = 9600, + /* Maximum supported ICC I/O data rate in bps */ + .dwMaxDataRate = 9600, + .bNumDataRatesSupported = 0, + .dwMaxIFSD = 0, + .dwSynchProtocols = 0, + .dwMechanical = 0, + /* This value indicates what intelligent features the CCID has */ + .dwFeatures = CCID_FEATURES_EXC_SAPDU | + CCID_FEATURES_AUTO_PNEGO | + CCID_FEATURES_AUTO_BAUD | + CCID_FEATURES_AUTO_CLOCK | + CCID_FEATURES_AUTO_VOLT | + CCID_FEATURES_AUTO_ACTIV | + CCID_FEATURES_AUTO_PCONF, + /* extended APDU level Message Length */ + .dwMaxCCIDMessageLength = 0x200, + .bClassGetResponse = 0x0, + .bClassEnvelope = 0x0, + .wLcdLayout = 0, + .bPINSupport = 0, + .bMaxCCIDBusySlots = 1 +}; +/* Full speed support: */ +static struct usb_endpoint_descriptor ccid_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET), + .bInterval = 1 << CCID_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor ccid_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor ccid_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *ccid_fs_descs[] = { + (struct usb_descriptor_header *) &ccid_interface_desc, + (struct usb_descriptor_header *) &ccid_class_desc, + (struct usb_descriptor_header *) &ccid_fs_notify_desc, + (struct usb_descriptor_header *) &ccid_fs_in_desc, + (struct usb_descriptor_header *) &ccid_fs_out_desc, + NULL, +}; + +/* High speed support: */ +static struct usb_endpoint_descriptor ccid_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(CCID_NOTIFY_MAXPACKET), + .bInterval = CCID_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor ccid_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor ccid_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *ccid_hs_descs[] = { + (struct usb_descriptor_header *) &ccid_interface_desc, + (struct usb_descriptor_header *) &ccid_class_desc, + (struct usb_descriptor_header *) &ccid_hs_notify_desc, + (struct usb_descriptor_header *) &ccid_hs_in_desc, + (struct usb_descriptor_header *) &ccid_hs_out_desc, + NULL, +}; + +static inline struct f_ccid *func_to_ccid(struct usb_function *f) +{ + return container_of(f, struct f_ccid, function); +} + +static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head, + struct usb_request *req) +{ + unsigned long flags; + + spin_lock_irqsave(&ccid_dev->lock, flags); + list_add_tail(&req->list, head); + spin_unlock_irqrestore(&ccid_dev->lock, flags); +} + +static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev, + struct list_head *head) +{ + unsigned long flags; + struct usb_request *req = NULL; + + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!list_empty(head)) { + req = list_first_entry(head, struct usb_request, list); + list_del(&req->list); + } + spin_unlock_irqrestore(&ccid_dev->lock, flags); + return req; +} + +static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + switch (req->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case 0: + break; + default: + pr_err("CCID notify ep error %d\n", req->status); + } +} + +static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + + if (req->status != 0) + atomic_set(&bulk_dev->error, 1); + + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + wake_up(&bulk_dev->write_wq); +} + +static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + if (req->status != 0) + atomic_set(&bulk_dev->error, 1); + + bulk_dev->rx_done = 1; + wake_up(&bulk_dev->read_wq); +} + +static struct usb_request * +ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + req = NULL; + } + } + + return req ? req : ERR_PTR(-ENOMEM); +} + +static void ccid_request_free(struct usb_request *req, struct usb_ep *ep) +{ + if (req) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +static int +ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function); + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int ret = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + if (!atomic_read(&ccid_dev->online)) + return -ENOTCONN; + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | CCIDGENERICREQ_ABORT: + if (w_length != 0) + goto invalid; + ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT; + ctrl_dev->buf[1] = w_value & 0xFF; + ctrl_dev->buf[2] = (w_value >> 8) & 0xFF; + ctrl_dev->buf[3] = 0x00; + ctrl_dev->tx_ctrl_done = 1; + wake_up(&ctrl_dev->tx_wait_q); + return 0; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES: + if (w_length > req->length) + goto invalid; + *(u32 *) req->buf = + cpu_to_le32(ccid_class_desc.dwDefaultClock); + ret = min_t(u32, w_length, + sizeof(ccid_class_desc.dwDefaultClock)); + break; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | CCIDGENERICREQ_GET_DATA_RATES: + if (w_length > req->length) + goto invalid; + *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate); + ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate)); + break; + + default: +invalid: + pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + pr_err("ccid ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static void ccid_function_disable(struct usb_function *f) +{ + struct f_ccid *ccid_dev = func_to_ccid(f); + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + struct usb_request *req; + + /* Disable endpoints */ + usb_ep_disable(ccid_dev->notify); + usb_ep_disable(ccid_dev->in); + usb_ep_disable(ccid_dev->out); + /* Free endpoint related requests */ + ccid_request_free(ccid_dev->notify_req, ccid_dev->notify); + if (!atomic_read(&bulk_dev->rx_req_busy)) + ccid_request_free(bulk_dev->rx_req, ccid_dev->out); + while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle))) + ccid_request_free(req, ccid_dev->in); + + ccid_dev->dtr_state = 0; + atomic_set(&ccid_dev->online, 0); + /* Wake up threads */ + wake_up(&bulk_dev->write_wq); + wake_up(&bulk_dev->read_wq); + wake_up(&ctrl_dev->tx_wait_q); + +} + +static int +ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_ccid *ccid_dev = func_to_ccid(f); + struct usb_composite_dev *cdev = ccid_dev->cdev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct usb_request *req; + int ret = 0; + int i; + + ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify, + sizeof(struct usb_ccid_notification), GFP_ATOMIC); + if (IS_ERR(ccid_dev->notify_req)) { + pr_err("%s: unable to allocate memory for notify req\n", + __func__); + return PTR_ERR(ccid_dev->notify_req); + } + ccid_dev->notify_req->complete = ccid_notify_complete; + ccid_dev->notify_req->context = ccid_dev; + + /* now allocate requests for our endpoints */ + req = ccid_request_alloc(ccid_dev->out, BULK_OUT_BUFFER_SIZE, + GFP_ATOMIC); + if (IS_ERR(req)) { + pr_err("%s: unable to allocate memory for out req\n", + __func__); + ret = PTR_ERR(req); + goto free_notify; + } + req->complete = ccid_bulk_complete_out; + req->context = ccid_dev; + bulk_dev->rx_req = req; + + for (i = 0; i < TX_REQ_MAX; i++) { + req = ccid_request_alloc(ccid_dev->in, BULK_IN_BUFFER_SIZE, + GFP_ATOMIC); + if (IS_ERR(req)) { + pr_err("%s: unable to allocate memory for in req\n", + __func__); + ret = PTR_ERR(req); + goto free_bulk_out; + } + req->complete = ccid_bulk_complete_in; + req->context = ccid_dev; + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + } + + /* choose the descriptors and enable endpoints */ + ccid_dev->notify_desc = ep_choose(cdev->gadget, + ccid_dev->hs.notify, + ccid_dev->fs.notify); + ret = usb_ep_enable(ccid_dev->notify, ccid_dev->notify_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, ccid_dev->notify->name, ret); + goto free_bulk_in; + } + ccid_dev->notify->driver_data = ccid_dev; + + ccid_dev->in_desc = ep_choose(cdev->gadget, + ccid_dev->hs.in, ccid_dev->fs.in); + ret = usb_ep_enable(ccid_dev->in, ccid_dev->in_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, ccid_dev->in->name, ret); + goto disable_ep_notify; + } + + ccid_dev->out_desc = ep_choose(cdev->gadget, + ccid_dev->hs.out, ccid_dev->fs.out); + ret = usb_ep_enable(ccid_dev->out, ccid_dev->out_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, ccid_dev->out->name, ret); + goto disable_ep_in; + } + ccid_dev->dtr_state = 1; + atomic_set(&ccid_dev->online, 1); + return ret; + +disable_ep_in: + usb_ep_disable(ccid_dev->in); +disable_ep_notify: + usb_ep_disable(ccid_dev->notify); + ccid_dev->notify->driver_data = NULL; +free_bulk_in: + while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle))) + ccid_request_free(req, ccid_dev->in); +free_bulk_out: + ccid_request_free(bulk_dev->rx_req, ccid_dev->out); +free_notify: + ccid_request_free(ccid_dev->notify_req, ccid_dev->notify); + return ret; +} + +static void ccid_function_unbind(struct usb_configuration *c, + struct usb_function *f) +{ + if (gadget_is_dualspeed(c->cdev->gadget)) + usb_free_descriptors(f->hs_descriptors); + usb_free_descriptors(f->descriptors); + +} + +static int ccid_function_bind(struct usb_configuration *c, + struct usb_function *f) +{ + struct f_ccid *ccid_dev = func_to_ccid(f); + struct usb_ep *ep; + struct usb_composite_dev *cdev = c->cdev; + int ret = -ENODEV; + + ccid_dev->ifc_id = usb_interface_id(c, f); + if (ccid_dev->ifc_id < 0) { + pr_err("%s: unable to allocate ifc id, err:%d", + __func__, ccid_dev->ifc_id); + return ccid_dev->ifc_id; + } + ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id; + + ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc); + if (!ep) { + pr_err("%s: usb epnotify autoconfig failed\n", __func__); + return -ENODEV; + } + ccid_dev->notify = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc); + if (!ep) { + pr_err("%s: usb epin autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_in_fail; + } + ccid_dev->in = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc); + if (!ep) { + pr_err("%s: usb epout autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_out_fail; + } + ccid_dev->out = ep; + ep->driver_data = cdev; + + f->descriptors = usb_copy_descriptors(ccid_fs_descs); + if (!f->descriptors) + goto ep_auto_out_fail; + + ccid_dev->fs.in = usb_find_endpoint(ccid_fs_descs, + f->descriptors, + &ccid_fs_in_desc); + ccid_dev->fs.out = usb_find_endpoint(ccid_fs_descs, + f->descriptors, + &ccid_fs_out_desc); + ccid_dev->fs.notify = usb_find_endpoint(ccid_fs_descs, + f->descriptors, + &ccid_fs_notify_desc); + + if (gadget_is_dualspeed(cdev->gadget)) { + ccid_hs_in_desc.bEndpointAddress = + ccid_fs_in_desc.bEndpointAddress; + ccid_hs_out_desc.bEndpointAddress = + ccid_fs_out_desc.bEndpointAddress; + ccid_hs_notify_desc.bEndpointAddress = + ccid_fs_notify_desc.bEndpointAddress; + + /* copy descriptors, and track endpoint copies */ + f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs); + if (!f->hs_descriptors) + goto ep_auto_out_fail; + + ccid_dev->hs.in = usb_find_endpoint(ccid_hs_descs, + f->hs_descriptors, &ccid_hs_in_desc); + ccid_dev->hs.out = usb_find_endpoint(ccid_hs_descs, + f->hs_descriptors, &ccid_hs_out_desc); + ccid_dev->hs.notify = usb_find_endpoint(ccid_hs_descs, + f->hs_descriptors, &ccid_hs_notify_desc); + } + + pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__, + gadget_is_dualspeed(cdev->gadget) ? "dual" : "full", + ccid_dev->in->name, ccid_dev->out->name); + + return 0; + +ep_auto_out_fail: + ccid_dev->out->driver_data = NULL; + ccid_dev->out = NULL; +ep_auto_in_fail: + ccid_dev->in->driver_data = NULL; + ccid_dev->in = NULL; + + return ret; +} + +static int ccid_bulk_open(struct inode *ip, struct file *fp) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + unsigned long flags; + + pr_debug("ccid_bulk_open\n"); + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + + if (atomic_read(&bulk_dev->opened)) { + pr_debug("%s: bulk device is already opened\n", __func__); + return -EBUSY; + } + atomic_set(&bulk_dev->opened, 1); + /* clear the error latch */ + atomic_set(&bulk_dev->error, 0); + spin_lock_irqsave(&ccid_dev->lock, flags); + fp->private_data = ccid_dev; + spin_unlock_irqrestore(&ccid_dev->lock, flags); + + return 0; +} + +static int ccid_bulk_release(struct inode *ip, struct file *fp) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + + pr_debug("ccid_bulk_release\n"); + atomic_set(&bulk_dev->opened, 0); + return 0; +} + +static ssize_t ccid_bulk_read(struct file *fp, char __user *buf, + size_t count, loff_t *pos) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct usb_request *req; + int r = count, xfer; + int ret; + unsigned long flags; + + pr_debug("ccid_bulk_read(%d)\n", count); + + if (count > BULK_OUT_BUFFER_SIZE) { + pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n", + __func__, BULK_OUT_BUFFER_SIZE, count); + return -ENOMEM; + } + + if (atomic_read(&bulk_dev->error)) { + r = -EIO; + pr_err("%s bulk_dev_error\n", __func__); + goto done; + } + +requeue_req: + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + /* queue a request */ + req = bulk_dev->rx_req; + req->length = count; + bulk_dev->rx_done = 0; + spin_unlock_irqrestore(&ccid_dev->lock, flags); + ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL); + if (ret < 0) { + r = -EIO; + pr_err("%s usb ep queue failed\n", __func__); + atomic_set(&bulk_dev->error, 1); + goto done; + } + /* wait for a request to complete */ + ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done || + atomic_read(&bulk_dev->error) || + !atomic_read(&ccid_dev->online)); + if (ret < 0) { + atomic_set(&bulk_dev->error, 1); + r = ret; + usb_ep_dequeue(ccid_dev->out, req); + goto done; + } + if (!atomic_read(&bulk_dev->error)) { + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!atomic_read(&ccid_dev->online)) { + spin_unlock_irqrestore(&ccid_dev->lock, flags); + pr_debug("%s: USB cable not connected\n", __func__); + r = -ENODEV; + goto done; + } + /* If we got a 0-len packet, throw it back and try again. */ + if (req->actual == 0) { + spin_unlock_irqrestore(&ccid_dev->lock, flags); + goto requeue_req; + } + xfer = (req->actual < count) ? req->actual : count; + atomic_set(&bulk_dev->rx_req_busy, 1); + spin_unlock_irqrestore(&ccid_dev->lock, flags); + + if (copy_to_user(buf, req->buf, xfer)) + r = -EFAULT; + + spin_lock_irqsave(&ccid_dev->lock, flags); + atomic_set(&bulk_dev->rx_req_busy, 0); + if (!atomic_read(&ccid_dev->online)) { + ccid_request_free(bulk_dev->rx_req, ccid_dev->out); + spin_unlock_irqrestore(&ccid_dev->lock, flags); + pr_debug("%s: USB cable not connected\n", __func__); + r = -ENODEV; + goto done; + } + spin_unlock_irqrestore(&ccid_dev->lock, flags); + } else { + r = -EIO; + } +done: + pr_debug("ccid_bulk_read returning %d\n", r); + return r; +} + +static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; + struct usb_request *req = 0; + int r = count; + int ret; + unsigned long flags; + + pr_debug("ccid_bulk_write(%d)\n", count); + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + + if (!count) { + pr_err("%s: zero length ctrl pkt\n", __func__); + return -ENODEV; + } + if (count > BULK_IN_BUFFER_SIZE) { + pr_err("%s: max_buffer_size:%d given_pkt_size:%d\n", + __func__, BULK_IN_BUFFER_SIZE, count); + return -ENOMEM; + } + + + /* get an idle tx request to use */ + ret = wait_event_interruptible(bulk_dev->write_wq, + ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) || + atomic_read(&bulk_dev->error))); + + if (ret < 0) { + r = ret; + goto done; + } + + if (atomic_read(&bulk_dev->error)) { + pr_err(" %s dev->error\n", __func__); + r = -EIO; + goto done; + } + if (copy_from_user(req->buf, buf, count)) { + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", + __func__); + ccid_request_free(req, ccid_dev->in); + r = -ENODEV; + } else { + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + r = -EFAULT; + } + goto done; + } + req->length = count; + ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL); + if (ret < 0) { + pr_debug("ccid_bulk_write: xfer error %d\n", ret); + atomic_set(&bulk_dev->error, 1); + ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req); + r = -EIO; + spin_lock_irqsave(&ccid_dev->lock, flags); + if (!atomic_read(&ccid_dev->online)) { + spin_unlock_irqrestore(&ccid_dev->lock, flags); + pr_debug("%s: USB cable not connected\n", + __func__); + while ((req = ccid_req_get(ccid_dev, + &bulk_dev->tx_idle))) + ccid_request_free(req, ccid_dev->in); + r = -ENODEV; + } + spin_unlock_irqrestore(&ccid_dev->lock, flags); + goto done; + } +done: + pr_debug("ccid_bulk_write returning %d\n", r); + return r; +} + +static const struct file_operations ccid_bulk_fops = { + .owner = THIS_MODULE, + .read = ccid_bulk_read, + .write = ccid_bulk_write, + .open = ccid_bulk_open, + .release = ccid_bulk_release, +}; + +static struct miscdevice ccid_bulk_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ccid_bulk", + .fops = &ccid_bulk_fops, +}; + +static int ccid_bulk_device_init(struct f_ccid *dev) +{ + int ret; + struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev; + + init_waitqueue_head(&bulk_dev->read_wq); + init_waitqueue_head(&bulk_dev->write_wq); + INIT_LIST_HEAD(&bulk_dev->tx_idle); + + ret = misc_register(&ccid_bulk_device); + if (ret) { + pr_err("%s: failed to register misc device\n", __func__); + return ret; + } + + return 0; +} + +static int ccid_ctrl_open(struct inode *inode, struct file *fp) +{ + struct f_ccid *ccid_dev = _ccid_dev; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + unsigned long flags; + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + if (atomic_read(&ctrl_dev->opened)) { + pr_debug("%s: ctrl device is already opened\n", __func__); + return -EBUSY; + } + atomic_set(&ctrl_dev->opened, 1); + spin_lock_irqsave(&ccid_dev->lock, flags); + fp->private_data = ccid_dev; + spin_unlock_irqrestore(&ccid_dev->lock, flags); + + return 0; +} + + +static int ccid_ctrl_release(struct inode *inode, struct file *fp) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + + atomic_set(&ctrl_dev->opened, 0); + + return 0; +} + +static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev; + int ret = 0; + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + if (count > CTRL_BUF_SIZE) + count = CTRL_BUF_SIZE; + + ret = wait_event_interruptible(ctrl_dev->tx_wait_q, + ctrl_dev->tx_ctrl_done); + if (ret < 0) + return ret; + ctrl_dev->tx_ctrl_done = 0; + + if (!atomic_read(&ccid_dev->online)) { + pr_debug("%s: USB cable not connected\n", __func__); + return -ENODEV; + } + ret = copy_to_user(buf, ctrl_dev->buf, count); + if (ret) + return -EFAULT; + + return count; +} + +static long +ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg) +{ + struct f_ccid *ccid_dev = fp->private_data; + struct usb_request *req = ccid_dev->notify_req; + struct usb_ccid_notification *ccid_notify = req->buf; + void __user *argp = (void __user *)arg; + int ret = 0; + + switch (cmd) { + case CCID_NOTIFY_CARD: + if (copy_from_user(ccid_notify, argp, + sizeof(struct usb_ccid_notification))) + return -EFAULT; + req->length = 2; + break; + case CCID_NOTIFY_HWERROR: + if (copy_from_user(ccid_notify, argp, + sizeof(struct usb_ccid_notification))) + return -EFAULT; + req->length = 4; + break; + case CCID_READ_DTR: + if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int))) + return -EFAULT; + return 0; + } + ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL); + if (ret < 0) { + pr_err("ccid notify ep enqueue error %d\n", ret); + return ret; + } + return 0; +} + +static const struct file_operations ccid_ctrl_fops = { + .owner = THIS_MODULE, + .open = ccid_ctrl_open, + .release = ccid_ctrl_release, + .read = ccid_ctrl_read, + .unlocked_ioctl = ccid_ctrl_ioctl, +}; + +static struct miscdevice ccid_ctrl_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ccid_ctrl", + .fops = &ccid_ctrl_fops, +}; + +static int ccid_ctrl_device_init(struct f_ccid *dev) +{ + int ret; + struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev; + + INIT_LIST_HEAD(&ctrl_dev->tx_q); + init_waitqueue_head(&ctrl_dev->tx_wait_q); + + ret = misc_register(&ccid_ctrl_device); + if (ret) { + pr_err("%s: failed to register misc device\n", __func__); + return ret; + } + + return 0; +} + +static int ccid_bind_config(struct usb_configuration *c) +{ + struct f_ccid *ccid_dev = _ccid_dev; + + pr_debug("ccid_bind_config\n"); + ccid_dev->cdev = c->cdev; + ccid_dev->function.name = FUNCTION_NAME; + ccid_dev->function.descriptors = ccid_fs_descs; + ccid_dev->function.hs_descriptors = ccid_hs_descs; + ccid_dev->function.bind = ccid_function_bind; + ccid_dev->function.unbind = ccid_function_unbind; + ccid_dev->function.set_alt = ccid_function_set_alt; + ccid_dev->function.setup = ccid_function_setup; + ccid_dev->function.disable = ccid_function_disable; + + return usb_add_function(c, &ccid_dev->function); + +} + +static int ccid_setup(void) +{ + struct f_ccid *ccid_dev; + int ret; + + ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL); + if (!ccid_dev) + return -ENOMEM; + + _ccid_dev = ccid_dev; + spin_lock_init(&ccid_dev->lock); + + ret = ccid_ctrl_device_init(ccid_dev); + if (ret) { + pr_err("%s: ccid_ctrl_device_init failed, err:%d\n", + __func__, ret); + goto err_ctrl_init; + } + ret = ccid_bulk_device_init(ccid_dev); + if (ret) { + pr_err("%s: ccid_bulk_device_init failed, err:%d\n", + __func__, ret); + goto err_bulk_init; + } + + return 0; +err_bulk_init: + misc_deregister(&ccid_ctrl_device); +err_ctrl_init: + kfree(ccid_dev); + pr_err("ccid gadget driver failed to initialize\n"); + return ret; +} + +static void ccid_cleanup(void) +{ + misc_deregister(&ccid_bulk_device); + misc_deregister(&ccid_ctrl_device); + kfree(_ccid_dev); +} diff --git a/drivers/usb/gadget/f_ccid.h b/drivers/usb/gadget/f_ccid.h new file mode 100644 index 00000000000..4d6a0eac6ad --- /dev/null +++ b/drivers/usb/gadget/f_ccid.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details + */ + +#ifndef __F_CCID_H +#define __F_CCID_H + +#define PROTOCOL_TO 0x01 +#define PROTOCOL_T1 0x02 +#define ABDATA_SIZE 512 + +/* define for dwFeatures for Smart Card Device Class Descriptors */ +/* No special characteristics */ +#define CCID_FEATURES_NADA 0x00000000 +/* Automatic parameter configuration based on ATR data */ +#define CCID_FEATURES_AUTO_PCONF 0x00000002 +/* Automatic activation of ICC on inserting */ +#define CCID_FEATURES_AUTO_ACTIV 0x00000004 +/* Automatic ICC voltage selection */ +#define CCID_FEATURES_AUTO_VOLT 0x00000008 +/* Automatic ICC clock frequency change */ +#define CCID_FEATURES_AUTO_CLOCK 0x00000010 +/* Automatic baud rate change */ +#define CCID_FEATURES_AUTO_BAUD 0x00000020 +/*Automatic parameters negotiation made by the CCID */ +#define CCID_FEATURES_AUTO_PNEGO 0x00000040 +/* Automatic PPS made by the CCID according to the active parameters */ +#define CCID_FEATURES_AUTO_PPS 0x00000080 +/* CCID can set ICC in clock stop mode */ +#define CCID_FEATURES_ICCSTOP 0x00000100 +/* NAD value other than 00 accepted (T=1 protocol in use) */ +#define CCID_FEATURES_NAD 0x00000200 +/* Automatic IFSD exchange as first exchange (T=1 protocol in use) */ +#define CCID_FEATURES_AUTO_IFSD 0x00000400 +/* TPDU level exchanges with CCID */ +#define CCID_FEATURES_EXC_TPDU 0x00010000 +/* Short APDU level exchange with CCID */ +#define CCID_FEATURES_EXC_SAPDU 0x00020000 +/* Short and Extended APDU level exchange with CCID */ +#define CCID_FEATURES_EXC_APDU 0x00040000 +/* USB Wake up signaling supported on card insertion and removal */ +#define CCID_FEATURES_WAKEUP 0x00100000 + +#define CCID_NOTIFY_CARD _IOW('C', 1, struct usb_ccid_notification) +#define CCID_NOTIFY_HWERROR _IOW('C', 2, struct usb_ccid_notification) +#define CCID_READ_DTR _IOR('C', 3, int) + +struct usb_ccid_notification { + unsigned char buf[4]; +} __packed; + +struct ccid_bulk_in_header { + unsigned char bMessageType; + unsigned long wLength; + unsigned char bSlot; + unsigned char bSeq; + unsigned char bStatus; + unsigned char bError; + unsigned char bSpecific; + unsigned char abData[ABDATA_SIZE]; + unsigned char bSizeToSend; +} __packed; + +struct ccid_bulk_out_header { + unsigned char bMessageType; + unsigned long wLength; + unsigned char bSlot; + unsigned char bSeq; + unsigned char bSpecific_0; + unsigned char bSpecific_1; + unsigned char bSpecific_2; + unsigned char APDU[ABDATA_SIZE]; +} __packed; +#endif diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c index 2606afd1025..f492143f3dd 100644 --- a/drivers/usb/gadget/f_diag.c +++ b/drivers/usb/gadget/f_diag.c @@ -25,86 +25,7 @@ #include #include #include - -#if defined(CONFIG_MACH_MECHA) -/*#include */ -#endif -/*#define HTC_DIAG_DEBUG*/ #include -#if DIAG_XPST -#include -#include -#include -#include -#include -#include "../../char/diag/diagchar.h" -#include "../../char/diag/diagfwd.h" -#include "../../char/diag/diagmem.h" -#include "../../char/diag/diagchar_hdlc.h" -#if defined(CONFIG_MACH_MECHA) -#include "../../../arch/arm/mach-msm/7x30-smd/sdio_diag.h" -#endif - -static void fdiag_debugfs_init(void); - -#define USB_DIAG_IOC_MAGIC 0xFF -#define USB_DIAG_FUNC_IOC_ENABLE_SET _IOW(USB_DIAG_IOC_MAGIC, 1, int) -#define USB_DIAG_FUNC_IOC_ENABLE_GET _IOR(USB_DIAG_IOC_MAGIC, 2, int) -#define USB_DIAG_FUNC_IOC_REGISTER_SET _IOW(USB_DIAG_IOC_MAGIC, 3, char *) -#define USB_DIAG_FUNC_IOC_AMR_SET _IOW(USB_DIAG_IOC_MAGIC, 4, int) - -#define USB_DIAG_NV_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 1, uint16_t *) -#define USB_DIAG_NV_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 2, uint16_t *) -#define USB_DIAG_NV_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 3, uint16_t *) -#define USB_DIAG_NV_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 4, uint16_t *) -/* -#define USB_DIAG_RC9_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 5, uint16_t *) -#define USB_DIAG_RC9_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 6, uint16_t *) -#define USB_DIAG_RC9_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 7, uint16_t *) -#define USB_DIAG_RC9_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 8, uint16_t *) -*/ -#define USB_DIAG_PRL_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 9, uint16_t *) -#define USB_DIAG_PRL_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 10, uint16_t *) -#define USB_DIAG_PRL_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 11, uint16_t *) -#define USB_DIAG_PRL_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 12, uint16_t *) -#define USB_DIAG_M29_7K9K_SET _IOW(USB_DIAG_IOC_MAGIC, 13, uint16_t *) -#define USB_DIAG_M29_7KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 14, uint16_t *) -#define USB_DIAG_M29_9KONLY_SET _IOW(USB_DIAG_IOC_MAGIC, 15, uint16_t *) -#define USB_DIAG_M29_7K9KDIFF_SET _IOW(USB_DIAG_IOC_MAGIC, 16, uint16_t *) - - -#define USB_DIAG_FUNC_IOC_MODEM_GET _IOR(USB_DIAG_IOC_MAGIC, 17, int) -#define SMD_MAX 8192 -#define NV_TABLE_SZ 128 -#define M29_TABLE_SZ 10 -#define PRL_TABLE_SZ 10 - -#define EPST_PREFIX 0xC8 -#define HPST_PREFIX 0xF1 - - -#define NO_PST 0 -#define NO_DEF_ID 1 -#define DM7K9K 2 -#define DM7KONLY 3 -#define DM9KONLY 4 -#define DM7K9KDIFF 5 -#define NO_DEF_ITEM 0xff - -#define MAX(x, y) (x > y ? x : y) -#endif - -#if defined(CONFIG_MACH_MECHA) -int sdio_diag_init_enable; -#endif - -#if DIAG_XPST -#if defined(CONFIG_MACH_VIGOR) -static unsigned char *diag2arm9_buf_9k; -#endif -#endif - -int diag_configured; static DEFINE_SPINLOCK(ch_lock); static LIST_HEAD(usb_diag_ch_list); @@ -202,47 +123,8 @@ struct diag_context { unsigned long dpkts_tolaptop; unsigned long dpkts_tomodem; unsigned dpkts_tolaptop_pending; -#if DIAG_XPST - spinlock_t req_lock; - - struct mutex user_lock; -#define ID_TABLE_SZ 20 /* keep this small */ - struct list_head rx_req_idle; - struct list_head rx_req_user; - wait_queue_head_t read_wq; - char *user_read_buf; - uint32_t user_read_len; - char *user_readp; - bool opened; - /* list of registered command ids to be routed to userspace */ - unsigned char id_table[ID_TABLE_SZ]; - - /* smd_channel_t *ch; */ - int online; - int error; -/* for slate test */ - struct list_head rx_arm9_idle; - struct list_head rx_arm9_done; - struct mutex diag2arm9_lock; - struct mutex diag2arm9_read_lock; - struct mutex diag2arm9_write_lock; - bool diag2arm9_opened; - unsigned char toARM9_buf[SMD_MAX]; - unsigned char DM_buf[USB_MAX_OUT_BUF]; - unsigned read_arm9_count; - unsigned char *read_arm9_buf; - wait_queue_head_t read_arm9_wq; - struct usb_request *read_arm9_req; - u64 tx_count; /* to smd */ - u64 rx_count; /* from smd */ - u64 usb_in_count; /* to pc */ - u64 usb_out_count; /* from pc */ - int ready; -#endif }; -#include "u_xpst.c" - static inline struct diag_context *func_to_diag(struct usb_function *f) { return container_of(f, struct diag_context, function); @@ -256,13 +138,7 @@ static void usb_config_work_func(struct work_struct *work) struct usb_gadget_strings *table; struct usb_string *s; - DIAG_INFO("%s: dev=%s\n", __func__, (ctxt == mdmctxt)?DIAG_MDM:DIAG_LEGACY); -#if DIAG_XPST - ctxt->tx_count = ctxt->rx_count = 0; - ctxt->usb_in_count = ctxt->usb_out_count = 0; - driver->diag_smd_count = driver->diag_qdsp_count = 0; -#endif - if (ctxt->ch.notify && ctxt == legacyctxt) + if (ctxt->ch.notify) ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_CONNECT, NULL); if (!ctxt->update_pid_and_serial_num) @@ -329,10 +205,6 @@ static void diag_read_complete(struct usb_ep *ep, struct diag_context *ctxt = ep->driver_data; struct diag_request *d_req = req->context; unsigned long flags; -#if DIAG_XPST - struct usb_request *xpst_req; - unsigned int cmd_id; -#endif d_req->actual = req->actual; d_req->status = req->status; @@ -342,32 +214,7 @@ static void diag_read_complete(struct usb_ep *ep, spin_unlock_irqrestore(&ctxt->lock, flags); ctxt->dpkts_tomodem++; -#if DIAG_XPST -#ifdef HTC_DIAG_DEBUG - DIAG_INFO("%s: dev=%s\n", __func__, (ctxt == mdmctxt)?DIAG_MDM:DIAG_LEGACY); - print_hex_dump(KERN_DEBUG, "from PC: ", DUMP_PREFIX_ADDRESS, 16, 1, - req->buf, req->actual, 1); -#endif - cmd_id = *((unsigned short *)req->buf); - - if ((ctxt != mdmctxt) && if_route_to_userspace(ctxt, cmd_id)) { - xpst_req = xpst_req_get(ctxt, &ctxt->rx_req_idle); - if (xpst_req) { - xpst_req->actual = req->actual; - xpst_req->status = req->status; - memcpy(xpst_req->buf, req->buf, req->actual); - xpst_req_put(ctxt, &ctxt->rx_req_user, xpst_req); - wake_up(&ctxt->read_wq); - driver->nohdlc = 1; - } else - DIAG_INFO("%s No enough xpst_req \n", __func__); - } else { - driver->nohdlc = 0; - ctxt->tx_count += req->actual; - } - ctxt->usb_out_count += req->actual; -#endif if (ctxt->ch.notify) ctxt->ch.notify(ctxt->ch.priv, USB_DIAG_READ_DONE, d_req); } @@ -387,11 +234,10 @@ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, void (*notify)(void *, unsigned, struct diag_request *)) { struct usb_diag_ch *ch; - struct diag_context *ctxt = NULL; + struct diag_context *ctxt; unsigned long flags; int found = 0; - printk(KERN_DEBUG "[USB] %s: name: %s\n", __func__, name); spin_lock_irqsave(&ch_lock, flags); /* Check if we already have a channel with this name */ list_for_each_entry(ch, &usb_diag_ch_list, list) { @@ -403,27 +249,11 @@ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, spin_unlock_irqrestore(&ch_lock, flags); if (!found) { - /* have a static global variable already */ - if (!strcmp(name, DIAG_LEGACY)) { - legacyctxt = ctxt = &_context; - legacych = ch = &legacyctxt->ch; -#if DIAG_XPST - misc_register(&htc_diag_device_fops); - /*DMrounter*/ - misc_register(&diag2arm9_device); - ctxt->usb_in_count = ctxt->usb_out_count = 0; - ctxt->tx_count = ctxt->rx_count = 0; - driver->diag_smd_count = driver->diag_qdsp_count = 0; -#endif - } -#if defined(CONFIG_USB_ANDROID_MDM9K_DIAG) - else if (!strcmp(name, DIAG_MDM)) { - mdmctxt = ctxt = &_mdm_context; - mdmch = ch = &ctxt->ch; - } -#endif - else - return NULL; + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return ERR_PTR(-ENOMEM); + + ch = &ctxt->ch; } ch->name = name; @@ -434,9 +264,6 @@ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, list_add_tail(&ch->list, &usb_diag_ch_list); spin_unlock_irqrestore(&ch_lock, flags); - DIAG_INFO("%s: ch->name:%s ctxt:%p pkts_pending:%p\n", __func__, - ch->name, ctxt, &ctxt->dpkts_tolaptop_pending); - return ch; } EXPORT_SYMBOL(usb_diag_open); @@ -450,14 +277,17 @@ EXPORT_SYMBOL(usb_diag_open); */ void usb_diag_close(struct usb_diag_ch *ch) { + struct diag_context *dev = container_of(ch, struct diag_context, ch); unsigned long flags; spin_lock_irqsave(&ch_lock, flags); ch->priv = NULL; ch->notify = NULL; /* Free-up the resources if channel is no more active */ - if (!ch->priv_usb) + if (!ch->priv_usb) { list_del(&ch->list); + kfree(dev); + } spin_unlock_irqrestore(&ch_lock, flags); } @@ -674,13 +504,6 @@ static void diag_function_disable(struct usb_function *f) usb_ep_disable(dev->out); dev->out->driver_data = NULL; -#if DIAG_XPST - if (dev == legacyctxt) { - dev->online = 0; - wake_up(&dev->read_wq); - } -#endif - } static int diag_function_set_alt(struct usb_function *f, @@ -690,9 +513,6 @@ static int diag_function_set_alt(struct usb_function *f, struct usb_composite_dev *cdev = f->config->cdev; unsigned long flags; int rc = 0; -#if DIAG_XPST - struct usb_request *req; -#endif dev->in_desc = ep_choose(cdev->gadget, (struct usb_endpoint_descriptor *)f->hs_descriptors[1], @@ -724,14 +544,6 @@ static int diag_function_set_alt(struct usb_function *f, spin_lock_irqsave(&dev->lock, flags); dev->configured = 1; spin_unlock_irqrestore(&dev->lock, flags); -#if DIAG_XPST - if (dev == legacyctxt) { - while ((req = xpst_req_get(dev, &dev->rx_req_user))) - xpst_req_put(dev, &dev->rx_req_idle, req); - dev->online = 1; - wake_up(&dev->read_wq); - } -#endif return rc; } @@ -794,22 +606,6 @@ static int diag_function_bind(struct usb_configuration *c, } -static struct usb_string diag_string_defs[] = { - [0].s = "HTC DIAG", - [1].s = "HTC 9K DIAG", - { } /* end of list */ -}; - -static struct usb_gadget_strings diag_string_table = { - .language = 0x0409, /* en-us */ - .strings = diag_string_defs, -}; - -static struct usb_gadget_strings *diag_strings[] = { - &diag_string_table, - NULL, -}; - int diag_function_add(struct usb_configuration *c, const char *name, int (*update_pid)(uint32_t, const char *)) { @@ -834,10 +630,9 @@ int diag_function_add(struct usb_configuration *c, const char *name, /* claim the channel for this USB interface */ _ch->priv_usb = dev; - dev->update_pid_and_serial_num = update_pid; + dev->update_pid_and_serial_num = update_pid; dev->cdev = c->cdev; dev->function.name = _ch->name; - dev->function.strings = diag_strings; dev->function.descriptors = fs_diag_desc; dev->function.hs_descriptors = hs_diag_desc; dev->function.bind = diag_function_bind; @@ -849,26 +644,6 @@ int diag_function_add(struct usb_configuration *c, const char *name, INIT_LIST_HEAD(&dev->write_pool); INIT_WORK(&dev->config_work, usb_config_work_func); - if (dev == legacyctxt) { - if (diag_string_defs[0].id == 0) { - ret = usb_string_id(c->cdev); - if (ret < 0) - return ret; - diag_string_defs[0].id = ret; - } else - ret = diag_string_defs[0].id; - } else { - if (diag_string_defs[1].id == 0) { - ret = usb_string_id(c->cdev); - if (ret < 0) - return ret; - diag_string_defs[1].id = ret; - } else - ret = diag_string_defs[1].id; - } - - intf_desc.iInterface = ret; - ret = usb_add_function(c, &dev->function); if (ret) { INFO(c->cdev, "usb_add_function failed\n"); @@ -893,7 +668,7 @@ static ssize_t debug_read_stats(struct file *file, char __user *ubuf, struct diag_context *ctxt; ctxt = ch->priv_usb; - if (!ctxt) continue; + temp += scnprintf(buf + temp, PAGE_SIZE - temp, "---Name: %s---\n" "endpoints: %s, %s\n" @@ -970,30 +745,16 @@ static void diag_cleanup(void) spin_lock_irqsave(&ch_lock, flags); /* Free if diagchar is not using the channel anymore */ - if (!_ch->priv) + if (!_ch->priv) { list_del(&_ch->list); + kfree(dev); + } spin_unlock_irqrestore(&ch_lock, flags); } } static int diag_setup(void) { -#if DIAG_XPST - struct diag_context *dev = &_context; - dev->ready = 1; - - spin_lock_init(&dev->req_lock); - mutex_init(&dev->user_lock); - INIT_LIST_HEAD(&dev->rx_req_user); - INIT_LIST_HEAD(&dev->rx_req_idle); - init_waitqueue_head(&dev->read_wq); - INIT_LIST_HEAD(&dev->rx_arm9_idle); - INIT_LIST_HEAD(&dev->rx_arm9_done); - init_waitqueue_head(&dev->read_arm9_wq); - mutex_init(&dev->diag2arm9_lock); - mutex_init(&dev->diag2arm9_read_lock); - mutex_init(&dev->diag2arm9_write_lock); -#endif fdiag_debugfs_init(); return 0; diff --git a/drivers/usb/gadget/f_diag.h b/drivers/usb/gadget/f_diag.h new file mode 100644 index 00000000000..82d9a25245f --- /dev/null +++ b/drivers/usb/gadget/f_diag.h @@ -0,0 +1,24 @@ +/* drivers/usb/gadget/f_diag.h + * + * Diag Function Device - Route DIAG frames between SMD and USB + * + * Copyright (C) 2008-2009 Google, Inc. + * Copyright (c) 2009, Code Aurora Forum. All rights reserved. + * Author: Brian Swetland + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __F_DIAG_H +#define __F_DIAG_H + +int diag_function_add(struct usb_configuration *c, const char *); + +#endif /* __F_DIAG_H */ + diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 19fffccc370..1cefb9f1607 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -720,7 +720,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) if (code == FUNCTIONFS_INTERFACE_REVMAP) { struct ffs_function *func = ffs->func; ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; - } else if (gadget->ops->ioctl) { + } else if (gadget && gadget->ops->ioctl) { ret = gadget->ops->ioctl(gadget, code, value); } else { ret = -ENOTTY; diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c index b37960f9e75..0e64a47cd6b 100644 --- a/drivers/usb/gadget/f_loopback.c +++ b/drivers/usb/gadget/f_loopback.c @@ -373,7 +373,7 @@ int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume) /* support autoresume for remote wakeup testing */ if (autoresume) - sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; + loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; /* support OTG systems */ if (gadget_is_otg(cdev->gadget)) { diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 50e9dc24173..5d62a22bd6d 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -738,112 +738,6 @@ static int sleep_thread(struct fsg_common *common) } -static void _lba_to_msf(u8 *buf, int lba) -{ - lba += 150; - buf[0] = (lba / 75) / 60; - buf[1] = (lba / 75) % 60; - buf[2] = lba % 75; -} - - -static int _read_toc_raw(struct fsg_common *common, struct fsg_buffhd *bh) -{ - struct fsg_lun *curlun = common->curlun; - int msf = common->cmnd[1] & 0x02; - u8 *buf = (u8 *) bh->buf; - u8 *q; - int len; - - q = buf + 2; - memset(q, 0, 46); - *q++ = 1; /* first session */ - *q++ = 1; /* last session */ - - *q++ = 1; /* session number */ - *q++ = 0x14; /* data track */ - *q++ = 0; /* track number */ - *q++ = 0xa0; /* lead-in */ - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - *q++ = 0; - *q++ = 1; /* first track */ - *q++ = 0x00; /* disk type */ - *q++ = 0x00; - - *q++ = 1; /* session number */ - *q++ = 0x14; /* data track */ - *q++ = 0; /* track number */ - *q++ = 0xa1; - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - *q++ = 0; - *q++ = 1; /* last track */ - *q++ = 0x00; - *q++ = 0x00; - - *q++ = 1; /* session number */ - *q++ = 0x14; /* data track */ - *q++ = 0; /* track number */ - *q++ = 0xa2; /* lead-out */ - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - if (msf) { - *q++ = 0; /* reserved */ - _lba_to_msf(q, curlun->num_sectors); - q += 3; - } else { - put_unaligned_be32(curlun->num_sectors, q); - q += 4; - } - - *q++ = 1; /* session number */ - *q++ = 0x14; /* ADR, control */ - *q++ = 0; /* track number */ - *q++ = 1; /* point */ - *q++ = 0; /* min */ - *q++ = 0; /* sec */ - *q++ = 0; /* frame */ - if (msf) { - *q++ = 0; - _lba_to_msf(q, 0); - q += 3; - } else { - memset(q, 0, 4); - q += 4; - } - - len = q - buf; - put_unaligned_be16(len - 2, buf); - - return len; -} - - -static void cd_data_to_raw(u8 *buf, int lba) -{ - /* sync bytes */ - buf[0] = 0x00; - memset(buf + 1, 0xff, 10); - buf[11] = 0x00; - buf += 12; - - /* MSF */ - _lba_to_msf(buf, lba); - buf[3] = 0x01; /* mode 1 data */ - buf += 4; - - /* data */ - buf += 2048; - - /* XXX: ECC not computed */ - memset(buf, 0, 288); -} - - /*-------------------------------------------------------------------------*/ static int do_read(struct fsg_common *common) @@ -857,25 +751,15 @@ static int do_read(struct fsg_common *common) unsigned int amount; unsigned int partial_page; ssize_t nread; - u32 transfer_request; #ifdef CONFIG_USB_MSC_PROFILING ktime_t start, diff; #endif - if (common->cmnd[0] == READ_CD) { - if (common->data_size_from_cmnd == 0) - return 0; - transfer_request = common->cmnd[9]; - } else - transfer_request = 0; - /* * Get the starting Logical Block Address and check that it's * not too big. */ - if (common->cmnd[0] == READ_CD) - lba = get_unaligned_be32(&common->cmnd[2]); - else if (common->cmnd[0] == READ_6) + if (common->cmnd[0] == READ_6) lba = get_unaligned_be24(&common->cmnd[1]); else { lba = get_unaligned_be32(&common->cmnd[2]); @@ -894,18 +778,10 @@ static int do_read(struct fsg_common *common) curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; return -EINVAL; } + file_offset = ((loff_t) lba) << 9; - if ((transfer_request & 0xf8) == 0xf8) { - file_offset = ((loff_t) lba) << 11; - - /* read all data, 2352 byte */ - amount_left = 2352; - } else { - file_offset = ((loff_t) lba) << 9; - - /* Carry out the file reads */ - amount_left = common->data_size_from_cmnd; - } + /* Carry out the file reads */ + amount_left = common->data_size_from_cmnd; if (unlikely(amount_left == 0)) return -EIO; /* No default reply */ @@ -956,14 +832,9 @@ static int do_read(struct fsg_common *common) #ifdef CONFIG_USB_MSC_PROFILING start = ktime_get(); #endif - if ((transfer_request & 0xf8) == 0xf8) - nread = vfs_read(curlun->filp, - ((char __user *)bh->buf) + 16, - amount, &file_offset_tmp); - else - nread = vfs_read(curlun->filp, - (char __user *)bh->buf, - amount, &file_offset_tmp); + nread = vfs_read(curlun->filp, + (char __user *)bh->buf, + amount, &file_offset_tmp); VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, (unsigned long long) file_offset, (int) nread); #ifdef CONFIG_USB_MSC_PROFILING @@ -1007,9 +878,6 @@ static int do_read(struct fsg_common *common) common->next_buffhd_to_fill = bh->next; } - if ((transfer_request & 0xf8) == 0xf8) - cd_data_to_raw(bh->buf, lba); - return -EIO; /* No default reply */ } @@ -1521,7 +1389,6 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) struct fsg_lun *curlun = common->curlun; int msf = common->cmnd[1] & 0x02; int start_track = common->cmnd[6]; - int format = (common->cmnd[9] & 0xC0) >> 6; u8 *buf = (u8 *)bh->buf; if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ @@ -1530,9 +1397,6 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) return -EINVAL; } - if (format == 2) - return _read_toc_raw(common, bh); - memset(buf, 0, 20); buf[1] = (20-2); /* TOC data length */ buf[2] = 1; /* First track number */ @@ -1601,7 +1465,7 @@ static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) memset(buf+2, 0, 10); /* None of the fields are changeable */ if (!changeable_values) { - buf[2] = 0x00; /* Write cache disable, */ + buf[2] = 0x04; /* Write cache enable, */ /* Read cache not disabled */ /* No cache retention priorities */ put_unaligned_be16(0xffff, &buf[4]); @@ -1746,42 +1610,6 @@ static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) return -EINVAL; } -static int do_reserve(struct fsg_common *common, struct fsg_buffhd *bh) -{ - int call_us_ret = -1; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/system/sbin:/system/bin:/system/xbin", - NULL, - }; - char *exec_path[2] = {"/system/bin/stop", "/system/bin/start" }; - char *argv_stop[] = { exec_path[0], "adbd", NULL, }; - char *argv_start[] = { exec_path[1], "adbd", NULL, }; - - if (common->cmnd[1] == ('h'&0x1f) && common->cmnd[2] == 't' - && common->cmnd[3] == 'c') { - /* No special options */ - switch (common->cmnd[5]) { - case 0x01: /* enable adbd */ - call_us_ret = call_usermodehelper(exec_path[1], - argv_start, envp, UMH_WAIT_PROC); - break; - case 0x02: /*disable adbd */ - call_us_ret = call_usermodehelper(exec_path[0], - argv_stop, envp, UMH_WAIT_PROC); - break; - default: - printk(KERN_DEBUG "Unknown hTC specific command..." - "(0x%2.2X)\n", common->cmnd[5]); - break; - } - } - printk(KERN_NOTICE "%s adb daemon from mass_storage %s(%d)\n", - (common->cmnd[5] == 0x01) ? "Enable" : - (common->cmnd[5] == 0x02) ? "Disable" : "Unknown", - (call_us_ret == 0) ? "DONE" : "FAIL", call_us_ret); - return 0; -} /*-------------------------------------------------------------------------*/ @@ -2125,8 +1953,6 @@ static int check_command(struct fsg_common *common, int cmnd_size, "but we got %d\n", name, cmnd_size, common->cmnd_size); cmnd_size = common->cmnd_size; - } else if (common->cmnd[0] == RESERVE) { - cmnd_size = common->cmnd_size; } else { common->phase_error = 1; return -EINVAL; @@ -2305,16 +2131,6 @@ static int do_scsi_command(struct fsg_common *common) reply = do_read(common); break; - case READ_CD: - common->data_size_from_cmnd = ((common->cmnd[6] << 16) | - (common->cmnd[7] << 8) | (common->cmnd[8])) << 9; - reply = check_command(common, 12, DATA_DIR_TO_HOST, - (0xf<<2) | (7<<7), 1, "READ CD"); - - if (reply == 0) - reply = do_read(common); - break; - case READ_CAPACITY: common->data_size_from_cmnd = 8; reply = check_command(common, 10, DATA_DIR_TO_HOST, @@ -2342,7 +2158,7 @@ static int do_scsi_command(struct fsg_common *common) common->data_size_from_cmnd = get_unaligned_be16(&common->cmnd[7]); reply = check_command(common, 10, DATA_DIR_TO_HOST, - (0xf<<6) | (1<<1), 1, + (7<<6) | (1<<1), 1, "READ TOC"); if (reply == 0) reply = do_read_toc(common, bh); @@ -2435,15 +2251,6 @@ static int do_scsi_command(struct fsg_common *common) reply = do_write(common); break; - case RESERVE: - common->data_size_from_cmnd = common->cmnd[4]; - reply = check_command(common, 10, DATA_DIR_TO_HOST, - (1<<1) | (0xf<<2) , 0, - "RESERVE(6)"); - if (reply == 0) - reply = do_reserve(common, bh); - break; - /* * Some mandatory commands that we recognize but don't implement. * They don't mean much in this setting. It's left as an exercise @@ -2452,6 +2259,7 @@ static int do_scsi_command(struct fsg_common *common) */ case FORMAT_UNIT: case RELEASE: + case RESERVE: case SEND_DIAGNOSTIC: /* Fall through */ @@ -2460,7 +2268,7 @@ static int do_scsi_command(struct fsg_common *common) common->data_size_from_cmnd = 0; sprintf(unknown, "Unknown x%02x", common->cmnd[0]); reply = check_command(common, common->cmnd_size, - DATA_DIR_UNKNOWN, 0xff, 0, unknown); + DATA_DIR_UNKNOWN, ~0, 0, unknown); if (reply == 0) { common->curlun->sense_data = SS_INVALID_COMMAND; reply = -EINVAL; @@ -3149,9 +2957,9 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, } } snprintf(common->inquiry_string, sizeof common->inquiry_string, - "%-8s%-16s%04x", cfg->vendor_name ? cfg->vendor_name : "Linux", + "%-8s%-16s%04x", cfg->vendor_name ?: "Linux", /* Assume product name dependent on the first LUN */ - cfg->product_name ? cfg->product_name : (common->luns->cdrom + cfg->product_name ?: (common->luns->cdrom ? "File-Stor Gadget" : "File-CD Gadget"), i); diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c index 2829231327d..88473d1a513 100644 --- a/drivers/usb/gadget/f_mtp.c +++ b/drivers/usb/gadget/f_mtp.c @@ -410,15 +410,6 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev, ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; - ep = usb_ep_autoconfig(cdev->gadget, out_desc); - if (!ep) { - DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); - return -ENODEV; - } - DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); - ep->driver_data = dev; /* claim the endpoint */ - dev->ep_out = ep; - ep = usb_ep_autoconfig(cdev->gadget, intr_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); @@ -504,7 +495,17 @@ static ssize_t mtp_read(struct file *fp, char __user *buf, } /* wait for a request to complete */ - ret = wait_event_interruptible(dev->read_wq, dev->rx_done); + ret = wait_event_interruptible(dev->read_wq, + dev->rx_done || dev->state != STATE_BUSY); + if (dev->state == STATE_CANCELED) { + r = -ECANCELED; + if (!dev->rx_done) + usb_ep_dequeue(dev->ep_out, req); + spin_lock_irq(&dev->lock); + dev->state = STATE_CANCELED; + spin_unlock_irq(&dev->lock); + goto done; + } if (ret < 0) { r = ret; usb_ep_dequeue(dev->ep_out, req); @@ -707,7 +708,8 @@ static void send_file_work(struct work_struct *data) { ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "send_file_work: xfer error %d\n", ret); - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; r = -EIO; break; } @@ -759,7 +761,8 @@ static void receive_file_work(struct work_struct *data) ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; break; } } @@ -771,7 +774,8 @@ static void receive_file_work(struct work_struct *data) DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; - dev->state = STATE_ERROR; + if (dev->state != STATE_OFFLINE) + dev->state = STATE_ERROR; break; } write_req = NULL; diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 5e1495097ec..459dbdead0a 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c @@ -541,7 +541,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL); if (!req) - goto err; + goto err_req; req->complete = pn_rx_complete; fp->out_reqv[i] = req; @@ -550,14 +550,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f) /* Outgoing USB requests */ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL); if (!fp->in_req) - goto err; + goto err_req; INFO(cdev, "USB CDC Phonet function\n"); INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name, fp->out_ep->name, fp->in_ep->name); return 0; +err_req: + for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) + usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); err: + if (fp->out_ep) fp->out_ep->driver_data = NULL; if (fp->in_ep) diff --git a/drivers/usb/gadget/f_projector.c b/drivers/usb/gadget/f_projector.c index 557f9f5a188..c1f14051b1c 100644 --- a/drivers/usb/gadget/f_projector.c +++ b/drivers/usb/gadget/f_projector.c @@ -21,9 +21,18 @@ #include #include #include +#include +#include +#include #include - #include +#include +#include + +#ifdef DUMMY_DISPLAY_MODE +#include "f_projector_debug.h" +#endif + #ifdef DBG #undef DBG #endif @@ -34,25 +43,41 @@ #define DBG(x...) printk(KERN_INFO x) #endif +#ifdef VDBG +#undef VDBG +#endif + +#if 1 +#define VDBG(x...) do {} while (0) +#else +#define VDBG(x...) printk(KERN_INFO x) +#endif + + /*16KB*/ #define TXN_MAX 16384 #define RXN_MAX 4096 -/* number of rx and tx requests to allocate */ +/* number of rx requests to allocate */ #define PROJ_RX_REQ_MAX 4 -#if 0 -#define PROJ_TX_REQ_MAX 115 /*for resolution 1280*736*2 / 16k */ -#define PROJ_TX_REQ_MAX 75 /*for resolution 1024*600*2 / 16k */ -#define PROJ_TX_REQ_MAX 56 /*for 8k resolution 480*800*2 / 16k */ -#endif + +#define DEFAULT_PROJ_WIDTH 480 +#define DEFAULT_PROJ_HEIGHT 800 + +#define TOUCH_WIDTH 480 +#define TOUCH_HEIGHT 800 #define BITSPIXEL 16 #define PROJECTOR_FUNCTION_NAME "projector" +#define htc_mode_info(fmt, args...) \ + printk(KERN_INFO "[htc_mode] " pr_fmt(fmt), ## args) + static struct wake_lock prj_idle_wake_lock; static int keypad_code[] = {KEY_WAKEUP, 0, 0, 0, KEY_HOME, KEY_MENU, KEY_BACK}; -static const char shortname[] = "android_projector"; +static const char cand_shortname[] = "htc_cand"; +static const char htcmode_shortname[] = "htcmode"; struct projector_dev { struct usb_function function; @@ -62,6 +87,9 @@ struct projector_dev { struct usb_ep *ep_in; struct usb_ep *ep_out; + struct usb_endpoint_descriptor *in; + struct usb_endpoint_descriptor *out; + int online; int error; @@ -82,7 +110,20 @@ struct projector_dev { struct input_dev *keypad_input; struct input_dev *touch_input; char *fbaddr; - struct platform_device *pdev; + + atomic_t cand_online; + struct switch_dev cand_sdev; + struct switch_dev htcmode_sdev; + struct work_struct notifier_work; + struct work_struct htcmode_notifier_work; + + struct workqueue_struct *wq_display; + struct work_struct send_fb_work; + int start_send_fb; + + /* HTC Mode Protocol Info */ + struct htcmode_protocol *htcmode_proto; + u8 is_htcmode; }; static struct usb_interface_descriptor projector_interface_desc = { @@ -155,8 +196,31 @@ static struct usb_gadget_strings *projector_strings[] = { &projector_string_table, NULL, }; -static struct projector_dev _projector_dev; -struct device prj_dev; + +static struct projector_dev *projector_dev = NULL; + +struct size { + int w; + int h; +}; + +enum { + NOT_ON_AUTOBOT, + DOCK_ON_AUTOBOT, + HTC_MODE_RUNNING +}; +/* the value of htc_mode_status should be one of above status */ +static atomic_t htc_mode_status = ATOMIC_INIT(0); + +static void usb_setup_andriod_projector(struct work_struct *work); +static DECLARE_WORK(conf_usb_work, usb_setup_andriod_projector); + + +static void usb_setup_andriod_projector(struct work_struct *work) +{ + android_switch_htc_mode(); + htc_mode_enable(1); +} static inline struct projector_dev *proj_func_to_dev(struct usb_function *f) { @@ -188,21 +252,6 @@ static void projector_request_free(struct usb_request *req, struct usb_ep *ep) } } -static inline int _lock(atomic_t *excl) -{ - if (atomic_inc_return(excl) == 1) { - return 0; - } else { - atomic_dec(excl); - return -1; - } -} - -static inline void _unlock(atomic_t *excl) -{ - atomic_dec(excl); -} - /* add a request to the tail of a list */ static void proj_req_put(struct projector_dev *dev, struct list_head *head, struct usb_request *req) @@ -231,20 +280,20 @@ static struct usb_request *proj_req_get(struct projector_dev *dev, struct list_h return req; } -static void projector_queue_out(struct projector_dev *ctxt) +static void projector_queue_out(struct projector_dev *dev) { int ret; struct usb_request *req; /* if we have idle read requests, get them queued */ - while ((req = proj_req_get(ctxt, &ctxt->rx_idle))) { + while ((req = proj_req_get(dev, &dev->rx_idle))) { req->length = RXN_MAX; - DBG("%s: queue %p\n", __func__, req); - ret = usb_ep_queue(ctxt->ep_out, req, GFP_ATOMIC); + VDBG("%s: queue %p\n", __func__, req); + ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC); if (ret < 0) { - DBG("projector: failed to queue out req (%d)\n", ret); - ctxt->error = 1; - proj_req_put(ctxt, &ctxt->rx_idle, req); + VDBG("projector: failed to queue out req (%d)\n", ret); + dev->error = 1; + proj_req_put(dev, &dev->rx_idle, req); break; } } @@ -305,10 +354,10 @@ static void projector_send_touch_event(struct projector_dev *dev, } /* key code: 4 -> home, 5-> menu, 6 -> back, 0 -> system wake */ -static void projector_send_Key_event(struct projector_dev *ctxt, +static void projector_send_Key_event(struct projector_dev *dev, int iKeycode) { - struct input_dev *kdev = ctxt->keypad_input; + struct input_dev *kdev = dev->keypad_input; printk(KERN_INFO "%s keycode %d\n", __func__, iKeycode); /* ics will use default Generic.kl to translate linux keycode WAKEUP @@ -325,60 +374,146 @@ static void projector_send_Key_event(struct projector_dev *ctxt, input_sync(kdev); } -static void send_fb(struct projector_dev *ctxt) +#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) +extern char *get_fb_addr(void); +#endif + +static void send_fb(struct projector_dev *dev) { struct usb_request *req; - char *frame; int xfer; - int count = ctxt->framesize; + int count = dev->framesize; +#ifdef DUMMY_DISPLAY_MODE + unsigned short *frame; +#else + char *frame; +#endif - if (msmfb_get_fb_area()) - frame = (ctxt->fbaddr + ctxt->framesize); - else - frame = ctxt->fbaddr; + +#ifdef DUMMY_DISPLAY_MODE + frame = test_frame; +#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + frame = get_fb_addr(); +#else + if (msmfb_get_fb_area()) + frame = (dev->fbaddr + dev->framesize); + else + frame = dev->fbaddr; +#endif + if (frame == NULL) + return; while (count > 0) { - req = proj_req_get(ctxt, &ctxt->tx_idle); + req = proj_req_get(dev, &dev->tx_idle); if (req) { xfer = count > TXN_MAX? TXN_MAX : count; req->length = xfer; memcpy(req->buf, frame, xfer); - if (usb_ep_queue(ctxt->ep_in, req, GFP_ATOMIC) < 0) { - proj_req_put(ctxt, &ctxt->tx_idle, req); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); printk(KERN_WARNING "%s: failed to queue req %p\n", __func__, req); break; } + + count -= xfer; +#ifdef DUMMY_DISPLAY_MODE + frame += xfer/2; +#else + frame += xfer; +#endif + } else { + printk(KERN_ERR "send_fb: no req to send\n"); + break; + } + } +} + +static void send_fb2(struct projector_dev *dev) +{ + struct usb_request *req; + int xfer; + +#ifdef DUMMY_DISPLAY_MODE + unsigned short *frame; + int count = dev->framesize; +#else + char *frame; + int count = dev->htcmode_proto->server_info.width * + dev->htcmode_proto->server_info.height * (BITSPIXEL / 8); +#endif + +#ifdef DUMMY_DISPLAY_MODE + frame = test_frame; +#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + frame = get_fb_addr(); +#else + if (msmfb_get_fb_area()) + frame = (dev->fbaddr + dev->framesize); + else + frame = dev->fbaddr; +#endif + if (frame == NULL) + return; + + while (count > 0 && dev->online) { + + while (!(req = proj_req_get(dev, &dev->tx_idle))) { + msleep(1); + + if (!dev->online) + break; + } + + if (req) { + xfer = count > TXN_MAX? TXN_MAX : count; + req->length = xfer; + memcpy(req->buf, frame, xfer); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); + printk(KERN_WARNING "%s: failed to queue req" + " %p\n", __func__, req); + break; + } count -= xfer; +#ifdef DUMMY_DISPLAY_MODE + frame += xfer/2; +#else frame += xfer; +#endif } else { printk(KERN_ERR "send_fb: no req to send\n"); break; } } } -static void send_info(struct projector_dev *ctxt) + +void send_fb_do_work(struct work_struct *work) +{ + struct projector_dev *dev = projector_dev; + while (dev->start_send_fb) { + send_fb2(dev); + msleep(1); + } +} + + + +static void send_info(struct projector_dev *dev) { struct usb_request *req; - req = proj_req_get(ctxt, &ctxt->tx_idle); + req = proj_req_get(dev, &dev->tx_idle); if (req) { req->length = 20; memcpy(req->buf, "okay", 4); - memcpy(req->buf + 4, &ctxt->bitsPixel, 4); - #if defined(CONFIG_MACH_PARADISE) - if (machine_is_paradise()) { - ctxt->framesize = 320 * 480 * 2; - printk(KERN_INFO "send_info: framesize %d\n", - ctxt->framesize); - } - #endif - memcpy(req->buf + 8, &ctxt->framesize, 4); - memcpy(req->buf + 12, &ctxt->width, 4); - memcpy(req->buf + 16, &ctxt->height, 4); - if (usb_ep_queue(ctxt->ep_in, req, GFP_ATOMIC) < 0) { - proj_req_put(ctxt, &ctxt->tx_idle, req); + memcpy(req->buf + 4, &dev->bitsPixel, 4); + memcpy(req->buf + 8, &dev->framesize, 4); + memcpy(req->buf + 12, &dev->width, 4); + memcpy(req->buf + 16, &dev->height, 4); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); printk(KERN_WARNING "%s: failed to queue req %p\n", __func__, req); } @@ -386,81 +521,263 @@ static void send_info(struct projector_dev *ctxt) printk(KERN_INFO "%s: no req to send\n", __func__); } -static void projector_get_msmfb(struct projector_dev *ctxt) + +static void send_server_info(struct projector_dev *dev) +{ + struct usb_request *req; + + req = proj_req_get(dev, &dev->tx_idle); + if (req) { + req->length = sizeof(struct msm_server_info); + memcpy(req->buf, &dev->htcmode_proto->server_info, req->length); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); + printk(KERN_WARNING "%s: failed to queue req %p\n", + __func__, req); + } + } else { + printk(KERN_INFO "%s: no req to send\n", __func__); + } +} + +static void send_server_nonce(struct projector_dev *dev) +{ + struct usb_request *req; + int nonce[NONCE_SIZE]; + int i = 0; + + req = proj_req_get(dev, &dev->tx_idle); + if (req) { + req->length = NONCE_SIZE * sizeof(int); + for (i = 0; i < NONCE_SIZE; i++) + nonce[i] = get_random_int(); + memcpy(req->buf, nonce, req->length); + if (usb_ep_queue(dev->ep_in, req, GFP_ATOMIC) < 0) { + proj_req_put(dev, &dev->tx_idle, req); + printk(KERN_WARNING "%s: failed to queue req %p\n", + __func__, req); + } + } else { + printk(KERN_INFO "%s: no req to send\n", __func__); + } +} + +struct size rotate(struct size v) +{ + struct size r; + r.w = v.h; + r.h = v.w; + return r; +} + +static struct size get_projection_size(struct projector_dev *dev, struct msm_client_info *client_info) +{ + int server_width = 0; + int server_height = 0; + struct size client; + struct size server; + struct size ret; + int perserve_aspect_ratio = client_info->display_conf & (1 << 0); + int server_orientation = 0; + int client_orientation = (client_info->width > client_info->height); + int align_w = 0; + + server_width = dev->width; + server_height = dev->height; + + server_orientation = (server_width > server_height); + + printk(KERN_INFO "%s(): perserve_aspect_ratio= %d\n", __func__, perserve_aspect_ratio); + + client.w = client_info->width; + client.h = client_info->height; + server.w = server_width; + server.h = server_height; + + if (server_orientation != client_orientation) + client = rotate(client); + + align_w = client.h * server.w > server.h * client.w; + + if (perserve_aspect_ratio) { + if (align_w) { + ret.w = client.w; + ret.h = (client.w * server.h) / server.w; + } else { + ret.w = (client.h * server.w) / server.h; + ret.h = client.h; + } + + ret.w = round_down(ret.w, 32); + } else { + ret = client; + } + + printk(KERN_INFO "projector size(w=%d, h=%d)\n", ret.w, ret.h); + + return ret; +} + + +static void projector_get_msmfb(struct projector_dev *dev) { struct msm_fb_info fb_info; msmfb_get_var(&fb_info); - ctxt->bitsPixel = BITSPIXEL; - ctxt->width = fb_info.xres; - ctxt->height = fb_info.yres; - ctxt->fbaddr = fb_info.fb_addr; - printk(KERN_INFO "projector: width %d, height %d\n", - fb_info.xres, fb_info.yres); + dev->bitsPixel = BITSPIXEL; + dev->width = fb_info.xres; + dev->height = fb_info.yres; +#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + dev->fbaddr = get_fb_addr(); +#else + dev->fbaddr = fb_info.fb_addr; +#endif + dev->framesize = dev->width * dev->height * (dev->bitsPixel / 8); + printk(KERN_INFO "projector: width %d, height %d framesize %d, %p\n", + fb_info.xres, fb_info.yres, dev->framesize, dev->fbaddr); +} + +/* + * Handle HTC Mode specific messages and return 1 if message has been handled + */ +static int projector_handle_htcmode_msg(struct projector_dev *dev, struct usb_request *req) +{ + unsigned char *data = req->buf; + int handled = 1; + struct size projector_size; + + if ((data[0] == CLIENT_INFO_MESGID) && (req->actual == sizeof(struct msm_client_info))) { + memcpy(&dev->htcmode_proto->client_info, req->buf, sizeof(struct msm_client_info)); + + projector_size = get_projection_size(dev, &dev->htcmode_proto->client_info); + projector_get_msmfb(dev); + + dev->htcmode_proto->server_info.mesg_id = SERVER_INFO_MESGID; + dev->htcmode_proto->server_info.width = projector_size.w; + dev->htcmode_proto->server_info.height = projector_size.h; + dev->htcmode_proto->server_info.pixel_format = PIXEL_FORMAT_RGB565; + dev->htcmode_proto->server_info.ctrl_conf = CTRL_CONF_TOUCH_EVENT_SUPPORTED | + CTRL_CONF_NUM_SIMULTANEOUS_TOUCH; + send_server_info(dev); - ctxt->framesize = (ctxt->width)*(ctxt->height)*2; - printk(KERN_INFO "projector: width %d, height %d %d\n", - fb_info.xres, fb_info.yres, ctxt->framesize); + if (dev->htcmode_proto->version >= 0x0005) + send_server_nonce(dev); + } else if (dev->htcmode_proto->version >= 0x0005 && + data[0] == AUTH_CLIENT_NONCE_MESGID) { + /* TODO: Future extension */ + } else if (!strncmp("startfb", data, 7)) { + dev->start_send_fb = true; + queue_work(dev->wq_display, &dev->send_fb_work); + + dev->frame_count++; + + if (atomic_inc_return(&htc_mode_status) != HTC_MODE_RUNNING) + atomic_dec(&htc_mode_status); + + htc_mode_info("startfb current htc_mode_status = %d\n", + atomic_read(&htc_mode_status)); + schedule_work(&dev->htcmode_notifier_work); + + /* 30s send system wake code */ + if (dev->frame_count == 30 * 30) { + projector_send_Key_event(dev, 0); + dev->frame_count = 0; + } + } else if (!strncmp("endfb", data, 5)) { + dev->start_send_fb = false; + if (atomic_dec_return(&htc_mode_status) != DOCK_ON_AUTOBOT) + atomic_inc(&htc_mode_status); + htc_mode_info("endfb current htc_mode_status = %d\n", + atomic_read(&htc_mode_status)); + schedule_work(&dev->htcmode_notifier_work); + } else if (!strncmp("startcand", data, 9)) { + atomic_set(&dev->cand_online, 1); + htc_mode_info("startcand %d\n", atomic_read(&dev->cand_online)); + + schedule_work(&dev->notifier_work); + } else if (!strncmp("endcand", data, 7)) { + atomic_set(&dev->cand_online, 0); + htc_mode_info("endcand %d\n", atomic_read(&dev->cand_online)); + + schedule_work(&dev->notifier_work); + } else { + handled = 0; + } + + return handled; } static void projector_complete_in(struct usb_ep *ep, struct usb_request *req) { - struct projector_dev *dev = &_projector_dev; + struct projector_dev *dev = projector_dev; proj_req_put(dev, &dev->tx_idle, req); } static void projector_complete_out(struct usb_ep *ep, struct usb_request *req) { - struct projector_dev *ctxt = &_projector_dev; + struct projector_dev *dev = projector_dev; unsigned char *data = req->buf; int mouse_data[3]; int i; - DBG("%s: status %d, %d bytes\n", __func__, + int handled = 0; + VDBG("%s: status %d, %d bytes\n", __func__, req->status, req->actual); if (req->status != 0) { - ctxt->error = 1; - proj_req_put(ctxt, &ctxt->rx_idle, req); + dev->error = 1; + proj_req_put(dev, &dev->rx_idle, req); return ; } - /* for mouse event type, 1 :move, 2:down, 3:up */ - mouse_data[0] = *((int *)(req->buf)); - - if (!strncmp("init", data, 4)) { - if (!ctxt->init_done) { - projector_get_msmfb(ctxt); - ctxt->init_done = 1; - } - send_info(ctxt); - /* system wake code */ - projector_send_Key_event(ctxt, 0); - } else if (*data == ' ') { - send_fb(ctxt); - ctxt->frame_count++; - /* 30s send system wake code */ - if (ctxt->frame_count == 30 * 30) { - projector_send_Key_event(ctxt, 0); - ctxt->frame_count = 0; - } - } else if (mouse_data[0] > 0) { - if (mouse_data[0] < 4) { - for (i = 0; i < 3; i++) - mouse_data[i] = *(((int *)(req->buf))+i); - projector_send_touch_event(ctxt, - mouse_data[0], mouse_data[1], mouse_data[2]); - } else { - projector_send_Key_event(ctxt, mouse_data[0]); - printk(KERN_INFO "projector: Key command data %02x, keycode %d\n", - *((char *)(req->buf)), mouse_data[0]); - } - } else if (mouse_data[0] != 0) - printk(KERN_ERR "projector: Unknow command data %02x, mouse %d,%d,%d\n", - *((char *)(req->buf)), mouse_data[0], mouse_data[1], mouse_data[2]); - - proj_req_put(ctxt, &ctxt->rx_idle, req); - projector_queue_out(ctxt); + if (dev->is_htcmode) + handled = projector_handle_htcmode_msg(dev, req); + + if (!handled) { + /* for mouse event type, 1 :move, 2:down, 3:up */ + mouse_data[0] = *((int *)(req->buf)); + + if (!strncmp("init", data, 4)) { + + dev->init_done = 1; + dev->bitsPixel = BITSPIXEL; + dev->width = DEFAULT_PROJ_WIDTH; + dev->height = DEFAULT_PROJ_HEIGHT; + dev->framesize = dev->width * dev->height * (BITSPIXEL / 8); + + send_info(dev); + /* system wake code */ + projector_send_Key_event(dev, 0); + + atomic_set( &htc_mode_status, HTC_MODE_RUNNING); + htc_mode_info("init current htc_mode_status = %d\n", + atomic_read(&htc_mode_status)); + schedule_work(&dev->htcmode_notifier_work); + } else if (*data == ' ') { + send_fb(dev); + dev->frame_count++; + /* 30s send system wake code */ + if (dev->frame_count == 30 * 30) { + projector_send_Key_event(dev, 0); + dev->frame_count = 0; + } + } else if (mouse_data[0] > 0) { + if (mouse_data[0] < 4) { + for (i = 0; i < 3; i++) + mouse_data[i] = *(((int *)(req->buf))+i); + projector_send_touch_event(dev, + mouse_data[0], mouse_data[1], mouse_data[2]); + } else { + projector_send_Key_event(dev, mouse_data[0]); + printk(KERN_INFO "projector: Key command data %02x, keycode %d\n", + *((char *)(req->buf)), mouse_data[0]); + } + } else if (mouse_data[0] != 0) + printk(KERN_ERR "projector: Unknow command data %02x, mouse %d,%d,%d\n", + *((char *)(req->buf)), mouse_data[0], mouse_data[1], mouse_data[2]); + } + proj_req_put(dev, &dev->rx_idle, req); + projector_queue_out(dev); wake_lock_timeout(&prj_idle_wake_lock, HZ / 2); } @@ -529,7 +846,7 @@ projector_function_bind(struct usb_configuration *c, struct usb_function *f) int ret; dev->cdev = cdev; - DBG("projector_function_bind dev: %p\n", dev); + DBG("%s\n", __func__); /* allocate interface ID(s) */ id = usb_interface_id(c, f); @@ -557,70 +874,53 @@ projector_function_bind(struct usb_configuration *c, struct usb_function *f) return 0; } -static void -projector_function_unbind(struct usb_configuration *c, struct usb_function *f) -{ - struct projector_dev *dev = proj_func_to_dev(f); - struct usb_request *req; - - while ((req = proj_req_get(dev, &dev->tx_idle))) - projector_request_free(req, dev->ep_in); - while ((req = proj_req_get(dev, &dev->rx_idle))) - projector_request_free(req, dev->ep_out); - - dev->online = 0; - dev->error = 1; - if (dev->touch_input) - input_unregister_device(dev->touch_input); - if (dev->keypad_input) - input_unregister_device(dev->keypad_input); -} static int projector_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { - struct projector_dev *dev = proj_func_to_dev(f); + struct projector_dev *dev = proj_func_to_dev(f); struct usb_composite_dev *cdev = f->config->cdev; + struct android_dev *adev = _android_dev; + struct android_usb_function *af; int ret; DBG("%s intf: %d alt: %d\n", __func__, intf, alt); - ret = usb_ep_enable(dev->ep_in, - ep_choose(cdev->gadget, + + dev->in = ep_choose(cdev->gadget, &projector_highspeed_in_desc, - &projector_fullspeed_in_desc)); + &projector_fullspeed_in_desc); + + dev->out = ep_choose(cdev->gadget, + &projector_highspeed_out_desc, + &projector_fullspeed_out_desc); + + ret = usb_ep_enable(dev->ep_in, dev->in); if (ret) return ret; - ret = usb_ep_enable(dev->ep_out, - ep_choose(cdev->gadget, - &projector_highspeed_out_desc, - &projector_fullspeed_out_desc)); + + ret = usb_ep_enable(dev->ep_out,dev->out); if (ret) { usb_ep_disable(dev->ep_in); return ret; } - dev->online = 1; + + dev->online = 0; + list_for_each_entry(af, &adev->enabled_functions, enabled_list) { + if (!strcmp(af->name, f->name)) { + dev->online = 1; + break; + } + } projector_queue_out(dev); return 0; } -static void projector_function_disable(struct usb_function *f) -{ - struct projector_dev *dev = proj_func_to_dev(f); - - DBG("projector_function_disable\n"); - dev->online = 0; - dev->error = 1; - usb_ep_disable(dev->ep_in); - usb_ep_disable(dev->ep_out); - - VDBG(dev->cdev, "%s disabled\n", dev->function.name); -} static int projector_touch_init(struct projector_dev *dev) { - int x = dev->width; - int y = dev->height; + int x = TOUCH_WIDTH; + int y = TOUCH_HEIGHT; int ret = 0; struct input_dev *tdev = dev->touch_input; @@ -639,33 +939,6 @@ static int projector_touch_init(struct projector_dev *dev) set_bit(BTN_2, tdev->keybit); set_bit(EV_ABS, tdev->evbit); - if (x == 0) { - printk(KERN_ERR "%s: x=0\n", __func__); - #if defined(CONFIG_ARCH_QSD8X50) - x = 480; - #elif defined(CONFIG_MACH_PARADISE) - if (machine_is_paradise()) - x = 240; - else - x = 320; - #else - x = 320; - #endif - } - - if (y == 0) { - printk(KERN_ERR "%s: y=0\n", __func__); - #if defined(CONFIG_ARCH_QSD8X50) - y = 800; - #elif defined(CONFIG_MACH_PARADISE) - if (machine_is_paradise()) - y = 400; - else - y = 480; - #else - y = 480; - #endif - } /* Set input parameters boundary. */ input_set_abs_params(tdev, ABS_X, 0, x, 0, 0); input_set_abs_params(tdev, ABS_Y, 0, y, 0, 0); @@ -750,13 +1023,128 @@ static ssize_t show_enable(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(enable, 0664, show_enable, store_enable); #endif -static int projector_bind_config(struct usb_configuration *c) + +static void cand_online_notify(struct work_struct *w) +{ + struct projector_dev *dev = container_of(w, + struct projector_dev, notifier_work); + DBG("%s\n", __func__); + switch_set_state(&dev->cand_sdev, atomic_read(&dev->cand_online)); +} + +static void htcmode_status_notify(struct work_struct *w) +{ + struct projector_dev *dev = container_of(w, + struct projector_dev, htcmode_notifier_work); + DBG("%s\n", __func__); + switch_set_state(&dev->htcmode_sdev, atomic_read(&htc_mode_status)); +} + +/* + * 1: enable; 0: disable + */ +void htc_mode_enable(int enable) +{ + htc_mode_info("%s = %d, current htc_mode_status = %d\n", + __func__, enable, atomic_read(&htc_mode_status)); + + if (enable) + atomic_set(&htc_mode_status, DOCK_ON_AUTOBOT); + else + atomic_set(&htc_mode_status, NOT_ON_AUTOBOT); + + htcmode_status_notify(&projector_dev->htcmode_notifier_work); +} + +int check_htc_mode_status(void) +{ + return atomic_read(&htc_mode_status); +} + +static ssize_t print_cand_switch_name(struct switch_dev *sdev, char *buf) +{ + return sprintf(buf, "%s\n", cand_shortname); +} + +static ssize_t print_cand_switch_state(struct switch_dev *cand_sdev, char *buf) +{ + struct projector_dev *dev = container_of(cand_sdev, + struct projector_dev, cand_sdev); + return sprintf(buf, "%s\n", (atomic_read(&dev->cand_online) ? + "online" : "offline")); +} + +static ssize_t print_htcmode_switch_name(struct switch_dev *sdev, char *buf) +{ + return sprintf(buf, "%s\n", htcmode_shortname); +} + +static ssize_t print_htcmode_switch_state(struct switch_dev *htcmode_sdev, char *buf) +{ + return sprintf(buf, "%s\n", (atomic_read(&htc_mode_status)==HTC_MODE_RUNNING ? + "projecting" : (atomic_read(&htc_mode_status)==DOCK_ON_AUTOBOT ? "online" : "offline"))); +} + + +static void projector_function_disable(struct usb_function *f) +{ + struct projector_dev *dev = proj_func_to_dev(f); + + DBG("%s\n", __func__); + + dev->start_send_fb = false; + dev->online = 0; + dev->error = 1; + usb_ep_disable(dev->ep_in); + usb_ep_disable(dev->ep_out); + + atomic_set(&dev->cand_online, 0); + schedule_work(&dev->notifier_work); + + VDBG(dev->cdev, "%s disabled\n", dev->function.name); +} + + +static void +projector_function_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct projector_dev *dev = proj_func_to_dev(f); + struct usb_request *req; + + DBG("%s\n", __func__); + + destroy_workqueue(dev->wq_display); + + while ((req = proj_req_get(dev, &dev->tx_idle))) + projector_request_free(req, dev->ep_in); + while ((req = proj_req_get(dev, &dev->rx_idle))) + projector_request_free(req, dev->ep_out); + + dev->online = 0; + dev->error = 1; + dev->is_htcmode = 0; + + if (dev->touch_input) { + input_unregister_device(dev->touch_input); + input_free_device(dev->touch_input); + } + if (dev->keypad_input) { + input_unregister_device(dev->keypad_input); + input_free_device(dev->keypad_input); + } + +} + + +static int projector_bind_config(struct usb_configuration *c, + struct htcmode_protocol *config) { - struct projector_dev *dev = &_projector_dev; + struct projector_dev *dev; struct msm_fb_info fb_info; int ret = 0; - printk(KERN_INFO "projector_bind_config\n"); + DBG("%s\n", __func__); + dev = projector_dev; if (projector_string_defs[0].id == 0) { ret = usb_string_id(c->cdev); @@ -780,40 +1168,147 @@ static int projector_bind_config(struct usb_configuration *c) dev->bitsPixel = BITSPIXEL; dev->width = fb_info.xres; dev->height = fb_info.yres; +#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) + dev->fbaddr = get_fb_addr(); +#else dev->fbaddr = fb_info.fb_addr; - +#endif dev->rx_req_count = PROJ_RX_REQ_MAX; dev->tx_req_count = (dev->width * dev->height * 2 / TXN_MAX) + 1; printk(KERN_INFO "[USB][Projector]resolution: %u*%u" ", rx_cnt: %u, tx_cnt:%u\n", dev->width, dev->height, dev->rx_req_count, dev->tx_req_count); - if (projector_touch_init(dev) < 0) - goto err; + goto err_free; if (projector_keypad_init(dev) < 0) - goto err; + goto err_free; + spin_lock_init(&dev->lock); + INIT_LIST_HEAD(&dev->rx_idle); + INIT_LIST_HEAD(&dev->tx_idle); ret = usb_add_function(c, &dev->function); if (ret) - goto err; + goto err_free; + + dev->wq_display = create_singlethread_workqueue("projector_mode"); + if (!dev->wq_display) + goto err_free_wq; + + workqueue_set_max_active(dev->wq_display,1); + + INIT_WORK(&dev->send_fb_work, send_fb_do_work); + + dev->init_done = 0; + dev->frame_count = 0; + dev->is_htcmode = 0; + dev->htcmode_proto = config; + dev->htcmode_proto->server_info.height = DEFAULT_PROJ_HEIGHT; + dev->htcmode_proto->server_info.width = DEFAULT_PROJ_WIDTH; + dev->htcmode_proto->client_info.display_conf = 0; return 0; -err: - printk(KERN_ERR "projector gadget driver failed to initialize\n"); + +err_free_wq: + destroy_workqueue(dev->wq_display); +err_free: + printk(KERN_ERR "projector gadget driver failed to initialize, err=%d\n", ret); return ret; } static int projector_setup(void) { - struct projector_dev *dev = &_projector_dev; - dev->init_done = 0; - dev->frame_count = 0; - wake_lock_init(&prj_idle_wake_lock, WAKE_LOCK_IDLE, "prj_idle_lock"); + struct projector_dev *dev; + int ret = 0; - spin_lock_init(&dev->lock); - INIT_LIST_HEAD(&dev->rx_idle); - INIT_LIST_HEAD(&dev->tx_idle); + DBG("%s\n", __func__); + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + projector_dev = dev; + + INIT_WORK(&dev->notifier_work, cand_online_notify); + INIT_WORK(&dev->htcmode_notifier_work, htcmode_status_notify); + + dev->cand_sdev.name = cand_shortname; + dev->cand_sdev.print_name = print_cand_switch_name; + dev->cand_sdev.print_state = print_cand_switch_state; + ret = switch_dev_register(&dev->cand_sdev); + if (ret < 0) { + printk(KERN_ERR "usb cand_sdev switch_dev_register register fail\n"); + goto err_free; + } + + dev->htcmode_sdev.name = htcmode_shortname; + dev->htcmode_sdev.print_name = print_htcmode_switch_name; + dev->htcmode_sdev.print_state = print_htcmode_switch_state; + ret = switch_dev_register(&dev->htcmode_sdev); + if (ret < 0) { + printk(KERN_ERR "usb htcmode_sdev switch_dev_register register fail\n"); + goto err_unregister_cand; + } + + wake_lock_init(&prj_idle_wake_lock, WAKE_LOCK_IDLE, "prj_idle_lock"); return 0; + +err_unregister_cand: + switch_dev_unregister(&dev->cand_sdev); +err_free: + kfree(dev); + printk(KERN_ERR "projector gadget driver failed to initialize, err=%d\n", ret); + return ret; + } +static void projector_cleanup(void) +{ + struct projector_dev *dev; + + dev = projector_dev; + + switch_dev_unregister(&dev->cand_sdev); + switch_dev_unregister(&dev->htcmode_sdev); + + kfree(dev); +} + +#ifdef CONFIG_USB_ANDROID_PROJECTOR_HTC_MODE +static int projector_ctrlrequest(struct usb_composite_dev *cdev, + const struct usb_ctrlrequest *ctrl) +{ + int value = -EOPNOTSUPP; + + if (((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) && + (ctrl->bRequest == HTC_MODE_CONTROL_REQ)) { + if (check_htc_mode_status() == NOT_ON_AUTOBOT) + schedule_work(&conf_usb_work); + else { + if (projector_dev) { + projector_dev->htcmode_proto->version = le16_to_cpu(ctrl->wValue); + /* + * 0x0034 is for Autobot. It is not a correct HTC mode version. + */ + if (projector_dev->htcmode_proto->version == 0x0034) + projector_dev->htcmode_proto->version = 0x0003; + projector_dev->is_htcmode = 1; + printk(KERN_INFO "HTC Mode version = 0x%04X\n", projector_dev->htcmode_proto->version); + } else { + printk(KERN_ERR "%s: projector_dev is NULL!!", __func__); + } + } + value = 0; + } + + if (value >= 0) { + cdev->req->zero = 0; + cdev->req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); + if (value < 0) + printk(KERN_ERR "%s setup response queue error\n", + __func__); + } + + return value; +} +#endif diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c new file mode 100644 index 00000000000..cbcf5ac76b3 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet.c @@ -0,0 +1,1057 @@ +/* + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include + +#include "u_rmnet.h" +#include "gadget_chips.h" + +#define RMNET_NOTIFY_INTERVAL 5 +#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification) + +struct rmnet_descs { + struct usb_endpoint_descriptor *in; + struct usb_endpoint_descriptor *out; + struct usb_endpoint_descriptor *notify; +}; + +#define ACM_CTRL_DTR (1 << 0) + +/* TODO: use separate structures for data and + * control paths + */ +struct f_rmnet { + struct grmnet port; + int ifc_id; + u8 port_num; + atomic_t online; + atomic_t ctrl_online; + struct usb_composite_dev *cdev; + + spinlock_t lock; + + /* usb descriptors */ + struct rmnet_descs fs; + struct rmnet_descs hs; + + /* usb eps*/ + struct usb_ep *notify; + struct usb_endpoint_descriptor *notify_desc; + struct usb_request *notify_req; + + /* control info */ + struct list_head cpkt_resp_q; + atomic_t notify_count; + unsigned long cpkts_len; +}; + +#define NR_RMNET_PORTS 1 +static unsigned int nr_rmnet_ports; +static unsigned int no_ctrl_smd_ports; +static unsigned int no_ctrl_hsic_ports; +static unsigned int no_data_bam_ports; +static unsigned int no_data_bam2bam_ports; +static unsigned int no_data_hsic_ports; +static struct rmnet_ports { + enum transport_type data_xport; + enum transport_type ctrl_xport; + unsigned data_xport_num; + unsigned ctrl_xport_num; + unsigned port_num; + struct f_rmnet *port; +} rmnet_ports[NR_RMNET_PORTS]; + +static struct usb_interface_descriptor rmnet_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor rmnet_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), + .bInterval = 1 << RMNET_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor rmnet_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor rmnet_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *rmnet_fs_function[] = { + (struct usb_descriptor_header *) &rmnet_interface_desc, + (struct usb_descriptor_header *) &rmnet_fs_notify_desc, + (struct usb_descriptor_header *) &rmnet_fs_in_desc, + (struct usb_descriptor_header *) &rmnet_fs_out_desc, + NULL, +}; + +/* High speed support */ +static struct usb_endpoint_descriptor rmnet_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE), + .bInterval = RMNET_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor rmnet_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rmnet_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *rmnet_hs_function[] = { + (struct usb_descriptor_header *) &rmnet_interface_desc, + (struct usb_descriptor_header *) &rmnet_hs_notify_desc, + (struct usb_descriptor_header *) &rmnet_hs_in_desc, + (struct usb_descriptor_header *) &rmnet_hs_out_desc, + NULL, +}; + +/* String descriptors */ + +static struct usb_string rmnet_string_defs[] = { + [0].s = "RmNet", + { } /* end of list */ +}; + +static struct usb_gadget_strings rmnet_string_table = { + .language = 0x0409, /* en-us */ + .strings = rmnet_string_defs, +}; + +static struct usb_gadget_strings *rmnet_strings[] = { + &rmnet_string_table, + NULL, +}; + +/* ------- misc functions --------------------*/ + +static inline struct f_rmnet *func_to_rmnet(struct usb_function *f) +{ + return container_of(f, struct f_rmnet, port.func); +} + +static inline struct f_rmnet *port_to_rmnet(struct grmnet *r) +{ + return container_of(r, struct f_rmnet, port); +} + +static struct usb_request * +frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, flags); + if (!req) + return ERR_PTR(-ENOMEM); + + req->buf = kmalloc(len, flags); + if (!req->buf) { + usb_ep_free_request(ep, req); + return ERR_PTR(-ENOMEM); + } + + req->length = len; + + return req; +} + +void frmnet_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags) +{ + struct rmnet_ctrl_pkt *pkt; + + pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags); + if (!pkt) + return ERR_PTR(-ENOMEM); + + pkt->buf = kmalloc(len, flags); + if (!pkt->buf) { + kfree(pkt); + return ERR_PTR(-ENOMEM); + } + pkt->len = len; + + return pkt; +} + +static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt) +{ + kfree(pkt->buf); + kfree(pkt); +} + +/* -------------------------------------------*/ + +static int rmnet_gport_setup(void) +{ + int ret; + int port_idx; + int i; + + pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u" + " smd ports: %u ctrl hsic ports: %u" + " nr_rmnet_ports: %u\n", + __func__, no_data_bam_ports, no_data_bam2bam_ports, + no_data_hsic_ports, no_ctrl_smd_ports, + no_ctrl_hsic_ports, nr_rmnet_ports); + + if (no_data_bam_ports || no_data_bam2bam_ports) { + ret = gbam_setup(no_data_bam_ports, + no_data_bam2bam_ports); + if (ret) + return ret; + } + + if (no_ctrl_smd_ports) { + ret = gsmd_ctrl_setup(no_ctrl_smd_ports); + if (ret) + return ret; + } + + if (no_data_hsic_ports) { + port_idx = ghsic_data_setup(no_data_hsic_ports, + USB_GADGET_RMNET); + if (port_idx < 0) + return port_idx; + for (i = 0; i < nr_rmnet_ports; i++) { + if (rmnet_ports[i].data_xport == + USB_GADGET_XPORT_HSIC) { + rmnet_ports[i].data_xport_num = port_idx; + port_idx++; + } + } + } + + if (no_ctrl_hsic_ports) { + port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports, + USB_GADGET_RMNET); + if (port_idx < 0) + return port_idx; + for (i = 0; i < nr_rmnet_ports; i++) { + if (rmnet_ports[i].ctrl_xport == + USB_GADGET_XPORT_HSIC) { + rmnet_ports[i].ctrl_xport_num = port_idx; + port_idx++; + } + } + } + + return 0; +} + +static int gport_rmnet_connect(struct f_rmnet *dev) +{ + int ret; + unsigned port_num; + enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport; + enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; + + pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n", + __func__, xport_to_str(cxport), xport_to_str(dxport), + dev, dev->port_num); + + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + switch (cxport) { + case USB_GADGET_XPORT_SMD: + ret = gsmd_ctrl_connect(&dev->port, port_num); + if (ret) { + pr_err("%s: gsmd_ctrl_connect failed: err:%d\n", + __func__, ret); + return ret; + } + break; + case USB_GADGET_XPORT_HSIC: + ret = ghsic_ctrl_connect(&dev->port, port_num); + if (ret) { + pr_err("%s: ghsic_ctrl_connect failed: err:%d\n", + __func__, ret); + return ret; + } + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(cxport)); + return -ENODEV; + } + + port_num = rmnet_ports[dev->port_num].data_xport_num; + switch (dxport) { + case USB_GADGET_XPORT_BAM: + case USB_GADGET_XPORT_BAM2BAM: + /* currently only one connection (idx 0) + is supported */ + ret = gbam_connect(&dev->port, port_num, + dxport, 0); + if (ret) { + pr_err("%s: gbam_connect failed: err:%d\n", + __func__, ret); + gsmd_ctrl_disconnect(&dev->port, port_num); + return ret; + } + break; + case USB_GADGET_XPORT_HSIC: + ret = ghsic_data_connect(&dev->port, port_num); + if (ret) { + pr_err("%s: ghsic_data_connect failed: err:%d\n", + __func__, ret); + ghsic_ctrl_disconnect(&dev->port, port_num); + return ret; + } + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(dxport)); + return -ENODEV; + } + + return 0; +} + +static int gport_rmnet_disconnect(struct f_rmnet *dev) +{ + unsigned port_num; + enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport; + enum transport_type dxport = rmnet_ports[dev->port_num].data_xport; + + pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n", + __func__, xport_to_str(cxport), xport_to_str(dxport), + dev, dev->port_num); + + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + switch (cxport) { + case USB_GADGET_XPORT_SMD: + gsmd_ctrl_disconnect(&dev->port, port_num); + break; + case USB_GADGET_XPORT_HSIC: + ghsic_ctrl_disconnect(&dev->port, port_num); + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(cxport)); + return -ENODEV; + } + + port_num = rmnet_ports[dev->port_num].data_xport_num; + switch (dxport) { + case USB_GADGET_XPORT_BAM: + case USB_GADGET_XPORT_BAM2BAM: + gbam_disconnect(&dev->port, port_num, dxport); + break; + case USB_GADGET_XPORT_HSIC: + ghsic_data_disconnect(&dev->port, port_num); + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %s\n", __func__, + xport_to_str(dxport)); + return -ENODEV; + } + + return 0; +} + +static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_rmnet *dev = func_to_rmnet(f); + + pr_debug("%s: portno:%d\n", __func__, dev->port_num); + + if (gadget_is_dualspeed(c->cdev->gadget)) + usb_free_descriptors(f->hs_descriptors); + usb_free_descriptors(f->descriptors); + + frmnet_free_req(dev->notify, dev->notify_req); + + kfree(f->name); +} + +static void frmnet_disable(struct usb_function *f) +{ + struct f_rmnet *dev = func_to_rmnet(f); + unsigned long flags; + struct rmnet_ctrl_pkt *cpkt; + + pr_debug("%s: port#%d\n", __func__, dev->port_num); + + usb_ep_disable(dev->notify); + + atomic_set(&dev->online, 0); + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->cpkt_resp_q)) { + cpkt = list_first_entry(&dev->cpkt_resp_q, + struct rmnet_ctrl_pkt, list); + + list_del(&cpkt->list); + rmnet_free_ctrl_pkt(cpkt); + } + atomic_set(&dev->notify_count, 0); + spin_unlock_irqrestore(&dev->lock, flags); + + gport_rmnet_disconnect(dev); +} + +static int +frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_rmnet *dev = func_to_rmnet(f); + struct usb_composite_dev *cdev = dev->cdev; + int ret; + + pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num); + + if (dev->notify->driver_data) { + pr_debug("%s: reset port:%d\n", __func__, dev->port_num); + usb_ep_disable(dev->notify); + } + dev->notify_desc = ep_choose(cdev->gadget, + dev->hs.notify, + dev->fs.notify); + ret = usb_ep_enable(dev->notify, dev->notify_desc); + if (ret) { + pr_err("%s: usb ep#%s enable failed, err#%d\n", + __func__, dev->notify->name, ret); + return ret; + } + dev->notify->driver_data = dev; + + if (dev->port.in->driver_data) { + pr_debug("%s: reset port:%d\n", __func__, dev->port_num); + gport_rmnet_disconnect(dev); + } + + dev->port.in_desc = ep_choose(cdev->gadget, + dev->hs.in, dev->fs.in); + dev->port.out_desc = ep_choose(cdev->gadget, + dev->hs.out, dev->fs.out); + + ret = gport_rmnet_connect(dev); + + atomic_set(&dev->online, 1); + + return ret; +} + +static void frmnet_ctrl_response_available(struct f_rmnet *dev) +{ + struct usb_request *req = dev->notify_req; + struct usb_cdc_notification *event; + unsigned long flags; + int ret; + + pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num); + + spin_lock_irqsave(&dev->lock, flags); + if (!atomic_read(&dev->online) || !req || !req->buf) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + if (atomic_inc_return(&dev->notify_count) != 1) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + event = req->buf; + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + spin_unlock_irqrestore(&dev->lock, flags); + + ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC); + if (ret) { + atomic_dec(&dev->notify_count); + pr_debug("ep enqueue error %d\n", ret); + } +} + +static void frmnet_connect(struct grmnet *gr) +{ + struct f_rmnet *dev; + + if (!gr) { + pr_err("%s: Invalid grmnet:%p\n", __func__, gr); + return; + } + + dev = port_to_rmnet(gr); + + atomic_set(&dev->ctrl_online, 1); +} + +static void frmnet_disconnect(struct grmnet *gr) +{ + struct f_rmnet *dev; + unsigned long flags; + struct usb_cdc_notification *event; + int status; + struct rmnet_ctrl_pkt *cpkt; + + if (!gr) { + pr_err("%s: Invalid grmnet:%p\n", __func__, gr); + return; + } + + dev = port_to_rmnet(gr); + + atomic_set(&dev->ctrl_online, 0); + + if (!atomic_read(&dev->online)) { + pr_debug("%s: nothing to do\n", __func__); + return; + } + + usb_ep_fifo_flush(dev->notify); + + event = dev->notify_req->buf; + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + + status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC); + if (status < 0) { + if (!atomic_read(&dev->online)) + return; + pr_err("%s: rmnet notify ep enqueue error %d\n", + __func__, status); + } + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->cpkt_resp_q)) { + cpkt = list_first_entry(&dev->cpkt_resp_q, + struct rmnet_ctrl_pkt, list); + + list_del(&cpkt->list); + rmnet_free_ctrl_pkt(cpkt); + } + atomic_set(&dev->notify_count, 0); + spin_unlock_irqrestore(&dev->lock, flags); + +} + +static int +frmnet_send_cpkt_response(void *gr, void *buf, size_t len) +{ + struct f_rmnet *dev; + struct rmnet_ctrl_pkt *cpkt; + unsigned long flags; + + if (!gr || !buf) { + pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n", + __func__, gr, buf); + return -ENODEV; + } + cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC); + if (IS_ERR(cpkt)) { + pr_err("%s: Unable to allocate ctrl pkt\n", __func__); + return -ENOMEM; + } + memcpy(cpkt->buf, buf, len); + cpkt->len = len; + + dev = port_to_rmnet(gr); + + pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); + + if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) { + rmnet_free_ctrl_pkt(cpkt); + return 0; + } + + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&cpkt->list, &dev->cpkt_resp_q); + spin_unlock_irqrestore(&dev->lock, flags); + + frmnet_ctrl_response_available(dev); + + return 0; +} + +static void +frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_rmnet *dev = req->context; + struct usb_composite_dev *cdev; + unsigned port_num; + + if (!dev) { + pr_err("%s: rmnet dev is null\n", __func__); + return; + } + + pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); + + cdev = dev->cdev; + + if (dev->port.send_encap_cmd) { + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + dev->port.send_encap_cmd(port_num, req->buf, req->actual); + } +} + +static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_rmnet *dev = req->context; + int status = req->status; + + pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num); + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&dev->notify_count, 0); + break; + default: + pr_err("rmnet notify ep error %d\n", status); + /* FALLTHROUGH */ + case 0: + if (!atomic_read(&dev->ctrl_online)) + break; + + if (atomic_dec_and_test(&dev->notify_count)) + break; + + status = usb_ep_queue(dev->notify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&dev->notify_count); + pr_debug("ep enqueue error %d\n", status); + } + break; + } +} + +static int +frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_rmnet *dev = func_to_rmnet(f); + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = cdev->req; + unsigned port_num; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + int ret = -EOPNOTSUPP; + + pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num); + + if (!atomic_read(&dev->online)) { + pr_debug("%s: usb cable is not connected\n", __func__); + return -ENOTCONN; + } + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + ret = w_length; + req->complete = frmnet_cmd_complete; + req->context = dev; + break; + + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value) + goto invalid; + else { + unsigned len; + struct rmnet_ctrl_pkt *cpkt; + + spin_lock(&dev->lock); + if (list_empty(&dev->cpkt_resp_q)) { + pr_err("ctrl resp queue empty " + " req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + spin_unlock(&dev->lock); + goto invalid; + } + + cpkt = list_first_entry(&dev->cpkt_resp_q, + struct rmnet_ctrl_pkt, list); + list_del(&cpkt->list); + spin_unlock(&dev->lock); + + len = min_t(unsigned, w_length, cpkt->len); + memcpy(req->buf, cpkt->buf, len); + ret = len; + + rmnet_free_ctrl_pkt(cpkt); + } + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + if (dev->port.notify_modem) { + port_num = rmnet_ports[dev->port_num].ctrl_xport_num; + dev->port.notify_modem(&dev->port, port_num, w_value); + } + ret = 0; + + break; + default: + +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = (ret < w_length); + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static int frmnet_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_rmnet *dev = func_to_rmnet(f); + struct usb_ep *ep; + struct usb_composite_dev *cdev = c->cdev; + int ret = -ENODEV; + + dev->ifc_id = usb_interface_id(c, f); + if (dev->ifc_id < 0) { + pr_err("%s: unable to allocate ifc id, err:%d", + __func__, dev->ifc_id); + return dev->ifc_id; + } + rmnet_interface_desc.bInterfaceNumber = dev->ifc_id; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc); + if (!ep) { + pr_err("%s: usb epin autoconfig failed\n", __func__); + return -ENODEV; + } + dev->port.in = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc); + if (!ep) { + pr_err("%s: usb epout autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_out_fail; + } + dev->port.out = ep; + ep->driver_data = cdev; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc); + if (!ep) { + pr_err("%s: usb epnotify autoconfig failed\n", __func__); + ret = -ENODEV; + goto ep_auto_notify_fail; + } + dev->notify = ep; + ep->driver_data = cdev; + + dev->notify_req = frmnet_alloc_req(ep, + sizeof(struct usb_cdc_notification), + GFP_KERNEL); + if (IS_ERR(dev->notify_req)) { + pr_err("%s: unable to allocate memory for notify req\n", + __func__); + ret = -ENOMEM; + goto ep_notify_alloc_fail; + } + + dev->notify_req->complete = frmnet_notify_complete; + dev->notify_req->context = dev; + + f->descriptors = usb_copy_descriptors(rmnet_fs_function); + + if (!f->descriptors) + goto fail; + + dev->fs.in = usb_find_endpoint(rmnet_fs_function, + f->descriptors, + &rmnet_fs_in_desc); + dev->fs.out = usb_find_endpoint(rmnet_fs_function, + f->descriptors, + &rmnet_fs_out_desc); + dev->fs.notify = usb_find_endpoint(rmnet_fs_function, + f->descriptors, + &rmnet_fs_notify_desc); + + if (gadget_is_dualspeed(cdev->gadget)) { + rmnet_hs_in_desc.bEndpointAddress = + rmnet_fs_in_desc.bEndpointAddress; + rmnet_hs_out_desc.bEndpointAddress = + rmnet_fs_out_desc.bEndpointAddress; + rmnet_hs_notify_desc.bEndpointAddress = + rmnet_fs_notify_desc.bEndpointAddress; + + /* copy descriptors, and track endpoint copies */ + f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function); + + if (!f->hs_descriptors) + goto fail; + + dev->hs.in = usb_find_endpoint(rmnet_hs_function, + f->hs_descriptors, &rmnet_hs_in_desc); + dev->hs.out = usb_find_endpoint(rmnet_hs_function, + f->hs_descriptors, &rmnet_hs_out_desc); + dev->hs.notify = usb_find_endpoint(rmnet_hs_function, + f->hs_descriptors, &rmnet_hs_notify_desc); + } + + pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n", + __func__, dev->port_num, + gadget_is_dualspeed(cdev->gadget) ? "dual" : "full", + dev->port.in->name, dev->port.out->name); + + return 0; + +fail: + if (f->descriptors) + usb_free_descriptors(f->descriptors); +ep_notify_alloc_fail: + dev->notify->driver_data = NULL; + dev->notify = NULL; +ep_auto_notify_fail: + dev->port.out->driver_data = NULL; + dev->port.out = NULL; +ep_auto_out_fail: + dev->port.in->driver_data = NULL; + dev->port.in = NULL; + + return ret; +} + +static int frmnet_bind_config(struct usb_configuration *c, unsigned portno) +{ + int status; + struct f_rmnet *dev; + struct usb_function *f; + unsigned long flags; + + pr_debug("%s: usb config:%p\n", __func__, c); + + if (portno >= nr_rmnet_ports) { + pr_err("%s: supporting ports#%u port_id:%u", __func__, + nr_rmnet_ports, portno); + return -ENODEV; + } + + if (rmnet_string_defs[0].id == 0) { + status = usb_string_id(c->cdev); + if (status < 0) { + pr_err("%s: failed to get string id, err:%d\n", + __func__, status); + return status; + } + rmnet_string_defs[0].id = status; + } + + dev = rmnet_ports[portno].port; + + spin_lock_irqsave(&dev->lock, flags); + dev->cdev = c->cdev; + f = &dev->port.func; + f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno); + spin_unlock_irqrestore(&dev->lock, flags); + if (!f->name) { + pr_err("%s: cannot allocate memory for name\n", __func__); + return -ENOMEM; + } + + f->strings = rmnet_strings; + f->bind = frmnet_bind; + f->unbind = frmnet_unbind; + f->disable = frmnet_disable; + f->set_alt = frmnet_set_alt; + f->setup = frmnet_setup; + dev->port.send_cpkt_response = frmnet_send_cpkt_response; + dev->port.disconnect = frmnet_disconnect; + dev->port.connect = frmnet_connect; + + status = usb_add_function(c, f); + if (status) { + pr_err("%s: usb add function failed: %d\n", + __func__, status); + kfree(f->name); + return status; + } + + pr_debug("%s: complete\n", __func__); + + return status; +} + +static void frmnet_cleanup(void) +{ + int i; + + for (i = 0; i < nr_rmnet_ports; i++) + kfree(rmnet_ports[i].port); + + nr_rmnet_ports = 0; + no_ctrl_smd_ports = 0; + no_data_bam_ports = 0; + no_data_bam2bam_ports = 0; + no_ctrl_hsic_ports = 0; + no_data_hsic_ports = 0; +} + +static int frmnet_init_port(const char *ctrl_name, const char *data_name) +{ + struct f_rmnet *dev; + struct rmnet_ports *rmnet_port; + int ret; + int i; + + if (nr_rmnet_ports >= NR_RMNET_PORTS) { + pr_err("%s: Max-%d instances supported\n", + __func__, NR_RMNET_PORTS); + return -EINVAL; + } + + pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n", + __func__, nr_rmnet_ports, ctrl_name, data_name); + + dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL); + if (!dev) { + pr_err("%s: Unable to allocate rmnet device\n", __func__); + return -ENOMEM; + } + + dev->port_num = nr_rmnet_ports; + spin_lock_init(&dev->lock); + INIT_LIST_HEAD(&dev->cpkt_resp_q); + + rmnet_port = &rmnet_ports[nr_rmnet_ports]; + rmnet_port->port = dev; + rmnet_port->port_num = nr_rmnet_ports; + rmnet_port->ctrl_xport = str_to_xport(ctrl_name); + rmnet_port->data_xport = str_to_xport(data_name); + + switch (rmnet_port->ctrl_xport) { + case USB_GADGET_XPORT_SMD: + rmnet_port->ctrl_xport_num = no_ctrl_smd_ports; + no_ctrl_smd_ports++; + break; + case USB_GADGET_XPORT_HSIC: + rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports; + no_ctrl_hsic_ports++; + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %u\n", __func__, + rmnet_port->ctrl_xport); + ret = -ENODEV; + goto fail_probe; + } + + switch (rmnet_port->data_xport) { + case USB_GADGET_XPORT_BAM: + rmnet_port->data_xport_num = no_data_bam_ports; + no_data_bam_ports++; + break; + case USB_GADGET_XPORT_BAM2BAM: + rmnet_port->data_xport_num = no_data_bam2bam_ports; + no_data_bam2bam_ports++; + break; + case USB_GADGET_XPORT_HSIC: + rmnet_port->data_xport_num = no_data_hsic_ports; + no_data_hsic_ports++; + break; + case USB_GADGET_XPORT_NONE: + break; + default: + pr_err("%s: Un-supported transport: %u\n", __func__, + rmnet_port->data_xport); + ret = -ENODEV; + goto fail_probe; + } + nr_rmnet_ports++; + + return 0; + +fail_probe: + for (i = 0; i < nr_rmnet_ports; i++) + kfree(rmnet_ports[i].port); + + nr_rmnet_ports = 0; + no_ctrl_smd_ports = 0; + no_data_bam_ports = 0; + no_ctrl_hsic_ports = 0; + no_data_hsic_ports = 0; + + return ret; +} diff --git a/drivers/usb/gadget/f_rmnet.h b/drivers/usb/gadget/f_rmnet.h new file mode 100644 index 00000000000..2d816c653f1 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet.h @@ -0,0 +1,19 @@ +/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __F_RMNET_H +#define __F_RMNET_H + +int rmnet_function_add(struct usb_configuration *c); + +#endif /* __F_RMNET_H */ diff --git a/drivers/usb/gadget/f_rmnet_sdio.c b/drivers/usb/gadget/f_rmnet_sdio.c new file mode 100644 index 00000000000..f63d9396a16 --- /dev/null +++ b/drivers/usb/gadget/f_rmnet_sdio.c @@ -0,0 +1,1535 @@ +/* + * f_rmnet_sdio.c -- RmNet SDIO function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 Nokia Corporation + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#ifdef CONFIG_RMNET_SDIO_CTL_CHANNEL +static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL; +#else +static uint32_t rmnet_sdio_ctl_ch; +#endif +module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO); +MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID"); + +#ifdef CONFIG_RMNET_SDIO_DATA_CHANNEL +static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL; +#else +static uint32_t rmnet_sdio_data_ch; +#endif +module_param(rmnet_sdio_data_ch, uint, S_IRUGO); +MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID"); + +#define ACM_CTRL_DTR (1 << 0) + +#define SDIO_MUX_HDR 8 +#define RMNET_SDIO_NOTIFY_INTERVAL 5 +#define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification) + +#define RMNET_SDIO_RX_REQ_MAX 16 +#define RMNET_SDIO_RX_REQ_SIZE 2048 +#define RMNET_SDIO_TX_REQ_MAX 200 + +#define TX_PKT_DROP_THRESHOLD 1000 +#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000 +#define RX_PKT_FLOW_CTRL_DISABLE 500 + +unsigned int sdio_tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD; +module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); + +unsigned int sdio_rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD; +module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); + +unsigned int sdio_rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE; +module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); + +/* QMI requests & responses buffer*/ +struct rmnet_sdio_qmi_buf { + void *buf; + int len; + struct list_head list; +}; + +struct rmnet_sdio_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + + struct usb_ep *epout; + struct usb_ep *epin; + struct usb_ep *epnotify; + struct usb_request *notify_req; + + u8 ifc_id; + /* QMI lists */ + struct list_head qmi_req_q; + unsigned int qreq_q_len; + struct list_head qmi_resp_q; + unsigned int qresp_q_len; + /* Tx/Rx lists */ + struct list_head tx_idle; + unsigned int tx_idle_len; + struct sk_buff_head tx_skb_queue; + struct list_head rx_idle; + unsigned int rx_idle_len; + struct sk_buff_head rx_skb_queue; + + spinlock_t lock; + atomic_t online; + atomic_t notify_count; + + struct workqueue_struct *wq; + struct work_struct disconnect_work; + + struct work_struct ctl_rx_work; + struct work_struct data_rx_work; + + struct delayed_work sdio_open_work; + struct work_struct sdio_close_work; +#define RMNET_SDIO_CH_OPEN 1 + unsigned long data_ch_status; + unsigned long ctrl_ch_status; + + unsigned int dpkts_pending_atdmux; + int cbits_to_modem; + struct work_struct set_modem_ctl_bits_work; + + /* pkt logging dpkt - data pkt; cpkt - control pkt*/ + struct dentry *dent; + unsigned long dpkt_tolaptop; + unsigned long dpkt_tomodem; + unsigned long tx_drp_cnt; + unsigned long cpkt_tolaptop; + unsigned long cpkt_tomodem; +}; + +static struct usb_interface_descriptor rmnet_sdio_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor rmnet_sdio_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE), + .bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor rmnet_sdio_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor rmnet_sdio_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *rmnet_sdio_fs_function[] = { + (struct usb_descriptor_header *) &rmnet_sdio_interface_desc, + (struct usb_descriptor_header *) &rmnet_sdio_fs_notify_desc, + (struct usb_descriptor_header *) &rmnet_sdio_fs_in_desc, + (struct usb_descriptor_header *) &rmnet_sdio_fs_out_desc, + NULL, +}; + +/* High speed support */ +static struct usb_endpoint_descriptor rmnet_sdio_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE), + .bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor rmnet_sdio_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rmnet_sdio_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *rmnet_sdio_hs_function[] = { + (struct usb_descriptor_header *) &rmnet_sdio_interface_desc, + (struct usb_descriptor_header *) &rmnet_sdio_hs_notify_desc, + (struct usb_descriptor_header *) &rmnet_sdio_hs_in_desc, + (struct usb_descriptor_header *) &rmnet_sdio_hs_out_desc, + NULL, +}; + +/* String descriptors */ + +static struct usb_string rmnet_sdio_string_defs[] = { + [0].s = "QMI RmNet", + { } /* end of list */ +}; + +static struct usb_gadget_strings rmnet_sdio_string_table = { + .language = 0x0409, /* en-us */ + .strings = rmnet_sdio_string_defs, +}; + +static struct usb_gadget_strings *rmnet_sdio_strings[] = { + &rmnet_sdio_string_table, + NULL, +}; + +static struct rmnet_sdio_qmi_buf * +rmnet_sdio_alloc_qmi(unsigned len, gfp_t kmalloc_flags) + +{ + struct rmnet_sdio_qmi_buf *qmi; + + qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags); + if (qmi != NULL) { + qmi->buf = kmalloc(len, kmalloc_flags); + if (qmi->buf == NULL) { + kfree(qmi); + qmi = NULL; + } + } + + return qmi ? qmi : ERR_PTR(-ENOMEM); +} + +static void rmnet_sdio_free_qmi(struct rmnet_sdio_qmi_buf *qmi) +{ + kfree(qmi->buf); + kfree(qmi); +} +/* + * Allocate a usb_request and its buffer. Returns a pointer to the + * usb_request or a pointer with an error code if there is an error. + */ +static struct usb_request * +rmnet_sdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (len && req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + req = NULL; + } + } + + return req ? req : ERR_PTR(-ENOMEM); +} + +/* + * Free a usb_request and its buffer. + */ +static void rmnet_sdio_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static void rmnet_sdio_notify_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&dev->notify_count, 0); + break; + default: + ERROR(cdev, "rmnet notifyep error %d\n", status); + /* FALLTHROUGH */ + case 0: + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + /* handle multiple pending QMI_RESPONSE_AVAILABLE + * notifications by resending until we're done + */ + if (atomic_dec_and_test(&dev->notify_count)) + break; + + status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enq error %d\n", status); + } + break; + } +} + +static void rmnet_sdio_qmi_resp_available(struct rmnet_sdio_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_cdc_notification *event; + int status; + unsigned long flags; + + /* Response will be sent later */ + if (atomic_inc_return(&dev->notify_count) != 1) + return; + + spin_lock_irqsave(&dev->lock, flags); + + if (!atomic_read(&dev->online)) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + event = dev->notify_req->buf; + + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + spin_unlock_irqrestore(&dev->lock, flags); + + status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC); + if (status < 0) { + if (atomic_read(&dev->online)) + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); + } +} + +#define SDIO_MAX_CTRL_PKT_SIZE 4096 +static void rmnet_sdio_ctl_receive_cb(void *data, int size, void *priv) +{ + struct rmnet_sdio_dev *dev = priv; + struct usb_composite_dev *cdev = dev->cdev; + struct rmnet_sdio_qmi_buf *qmi_resp; + unsigned long flags; + + if (!data) { + pr_info("%s: cmux_ch close event\n", __func__); + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) && + test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + queue_work(dev->wq, &dev->sdio_close_work); + } + return; + } + + if (!size || !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + + if (size > SDIO_MAX_CTRL_PKT_SIZE) { + ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n", + size, SDIO_MAX_CTRL_PKT_SIZE); + return; + } + + if (!atomic_read(&dev->online)) { + DBG(cdev, "USB disconnected\n"); + return; + } + + qmi_resp = rmnet_sdio_alloc_qmi(size, GFP_KERNEL); + if (IS_ERR(qmi_resp)) { + DBG(cdev, "unable to allocate memory for QMI resp\n"); + return; + } + memcpy(qmi_resp->buf, data, size); + qmi_resp->len = size; + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&qmi_resp->list, &dev->qmi_resp_q); + dev->qresp_q_len++; + spin_unlock_irqrestore(&dev->lock, flags); + + rmnet_sdio_qmi_resp_available(dev); +} + +static void rmnet_sdio_ctl_write_done(void *data, int size, void *priv) +{ + struct rmnet_sdio_dev *dev = priv; + struct usb_composite_dev *cdev = dev->cdev; + + VDBG(cdev, "rmnet control write done = %d bytes\n", size); +} + +static void rmnet_sdio_sts_callback(int id, void *priv) +{ + struct rmnet_sdio_dev *dev = priv; + struct usb_composite_dev *cdev = dev->cdev; + + DBG(cdev, "rmnet_sdio_sts_callback: id: %d\n", id); +} + +static void rmnet_sdio_control_rx_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, + ctl_rx_work); + struct usb_composite_dev *cdev = dev->cdev; + struct rmnet_sdio_qmi_buf *qmi_req; + unsigned long flags; + int ret; + + while (1) { + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(&dev->qmi_req_q)) + goto unlock; + + qmi_req = list_first_entry(&dev->qmi_req_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&qmi_req->list); + dev->qreq_q_len--; + spin_unlock_irqrestore(&dev->lock, flags); + + ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf, + qmi_req->len); + if (ret != qmi_req->len) { + ERROR(cdev, "rmnet control SDIO write failed\n"); + return; + } + + dev->cpkt_tomodem++; + + /* + * cmux_write API copies the buffer and gives it to sdio_al. + * Hence freeing the memory before write is completed. + */ + rmnet_sdio_free_qmi(qmi_req); + } +unlock: + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_response_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + + switch (req->status) { + case -ECONNRESET: + case -ESHUTDOWN: + case 0: + return; + default: + INFO(cdev, "rmnet %s response error %d, %d/%d\n", + ep->name, req->status, + req->actual, req->length); + } +} + +static void rmnet_sdio_command_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + struct rmnet_sdio_qmi_buf *qmi_req; + int len = req->actual; + + if (req->status < 0) { + ERROR(cdev, "rmnet command error %d\n", req->status); + return; + } + + /* discard the packet if sdio is not available */ + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + qmi_req = rmnet_sdio_alloc_qmi(len, GFP_ATOMIC); + if (IS_ERR(qmi_req)) { + ERROR(cdev, "unable to allocate memory for QMI req\n"); + return; + } + memcpy(qmi_req->buf, req->buf, len); + qmi_req->len = len; + spin_lock(&dev->lock); + list_add_tail(&qmi_req->list, &dev->qmi_req_q); + dev->qreq_q_len++; + spin_unlock(&dev->lock); + queue_work(dev->wq, &dev->ctl_rx_work); +} + +static int +rmnet_sdio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int ret = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + struct rmnet_sdio_qmi_buf *resp; + + if (!atomic_read(&dev->online)) + return -ENOTCONN; + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + ret = w_length; + req->complete = rmnet_sdio_command_complete; + req->context = dev; + break; + + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value) + goto invalid; + else { + unsigned len; + + spin_lock(&dev->lock); + + if (list_empty(&dev->qmi_resp_q)) { + INFO(cdev, "qmi resp empty " + " req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + spin_unlock(&dev->lock); + goto invalid; + } + + resp = list_first_entry(&dev->qmi_resp_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&resp->list); + dev->qresp_q_len--; + spin_unlock(&dev->lock); + + len = min_t(unsigned, w_length, resp->len); + memcpy(req->buf, resp->buf, len); + ret = len; + req->context = dev; + req->complete = rmnet_sdio_response_complete; + rmnet_sdio_free_qmi(resp); + + /* check if its the right place to add */ + dev->cpkt_tolaptop++; + } + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + /* This is a workaround for RmNet and is borrowed from the + * CDC/ACM standard. The host driver will issue the above ACM + * standard request to the RmNet interface in the following + * scenario: Once the network adapter is disabled from device + * manager, the above request will be sent from the qcusbnet + * host driver, with DTR being '0'. Once network adapter is + * enabled from device manager (or during enumeration), the + * request will be sent with DTR being '1'. + */ + if (w_value & ACM_CTRL_DTR) + dev->cbits_to_modem |= TIOCM_DTR; + else + dev->cbits_to_modem &= ~TIOCM_DTR; + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); + + ret = 0; + + break; + default: + +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = (ret < w_length); + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static int +rmnet_sdio_rx_submit(struct rmnet_sdio_dev *dev, struct usb_request *req, + gfp_t gfp_flags) +{ + struct sk_buff *skb; + int retval; + + skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags); + if (skb == NULL) + return -ENOMEM; + skb_reserve(skb, SDIO_MUX_HDR); + + req->buf = skb->data; + req->length = RMNET_SDIO_RX_REQ_SIZE; + req->context = skb; + + retval = usb_ep_queue(dev->epout, req, gfp_flags); + if (retval) + dev_kfree_skb_any(skb); + + return retval; +} + +static void rmnet_sdio_start_rx(struct rmnet_sdio_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + int status; + struct usb_request *req; + unsigned long flags; + + if (!atomic_read(&dev->online)) { + pr_err("%s: USB not connected\n", __func__); + return; + } + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->rx_idle)) { + req = list_first_entry(&dev->rx_idle, struct usb_request, list); + list_del(&req->list); + dev->rx_idle_len--; + + spin_unlock_irqrestore(&dev->lock, flags); + status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC); + spin_lock_irqsave(&dev->lock, flags); + + if (status) { + ERROR(cdev, "rmnet data rx enqueue err %d\n", status); + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + break; + } + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_start_tx(struct rmnet_sdio_dev *dev) +{ + unsigned long flags; + int status; + struct sk_buff *skb; + struct usb_request *req; + struct usb_composite_dev *cdev = dev->cdev; + + if (!atomic_read(&dev->online)) + return; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) + return; + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(&dev->tx_idle)) { + skb = __skb_dequeue(&dev->tx_skb_queue); + if (!skb) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + req = list_first_entry(&dev->tx_idle, struct usb_request, list); + req->context = skb; + req->buf = skb->data; + req->length = skb->len; + + list_del(&req->list); + dev->tx_idle_len--; + spin_unlock(&dev->lock); + status = usb_ep_queue(dev->epin, req, GFP_ATOMIC); + spin_lock(&dev->lock); + if (status) { + /* USB still online, queue requests back */ + if (atomic_read(&dev->online)) { + ERROR(cdev, "rmnet tx data enqueue err %d\n", + status); + list_add_tail(&req->list, &dev->tx_idle); + dev->tx_idle_len++; + __skb_queue_head(&dev->tx_skb_queue, skb); + } else { + req->buf = 0; + rmnet_sdio_free_req(dev->epin, req); + dev_kfree_skb_any(skb); + } + break; + } + dev->dpkt_tolaptop++; + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb) +{ + struct rmnet_sdio_dev *dev = priv; + unsigned long flags; + + /* SDIO mux sends NULL SKB when link state changes */ + if (!skb) { + pr_info("%s: dmux_ch close event\n", __func__); + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) && + test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + queue_work(dev->wq, &dev->sdio_close_work); + } + return; + } + + if (!atomic_read(&dev->online)) { + dev_kfree_skb_any(skb); + return; + } + + spin_lock_irqsave(&dev->lock, flags); + + if (dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) { + if (printk_ratelimit()) + pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n", + __func__, dev->tx_drp_cnt); + dev->tx_drp_cnt++; + spin_unlock_irqrestore(&dev->lock, flags); + dev_kfree_skb_any(skb); + return; + } + + __skb_queue_tail(&dev->tx_skb_queue, skb); + spin_unlock_irqrestore(&dev->lock, flags); + + rmnet_sdio_start_tx(dev); +} + +static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb) +{ + struct rmnet_sdio_dev *dev = priv; + + /* SDIO mux sends NULL SKB when link state changes */ + if (!skb) { + pr_info("%s: dmux_ch open event\n", __func__); + queue_delayed_work(dev->wq, &dev->sdio_open_work, 0); + return; + } + + dev_kfree_skb_any(skb); + /* this function is called from + * sdio mux from spin_lock_irqsave + */ + spin_lock(&dev->lock); + dev->dpkts_pending_atdmux--; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) || + dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) { + spin_unlock(&dev->lock); + return; + } + spin_unlock(&dev->lock); + + rmnet_sdio_start_rx(dev); +} + +static void rmnet_sdio_data_rx_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, + data_rx_work); + struct usb_composite_dev *cdev = dev->cdev; + struct sk_buff *skb; + int ret; + unsigned long flags; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + pr_info("%s: sdio data ch not open\n", __func__); + return; + } + + spin_lock_irqsave(&dev->lock, flags); + while ((skb = __skb_dequeue(&dev->rx_skb_queue))) { + spin_unlock_irqrestore(&dev->lock, flags); + ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb); + spin_lock_irqsave(&dev->lock, flags); + if (ret < 0) { + ERROR(cdev, "rmnet SDIO data write failed\n"); + dev_kfree_skb_any(skb); + break; + } else { + dev->dpkt_tomodem++; + dev->dpkts_pending_atdmux++; + } + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_complete_epout(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = ep->driver_data; + struct usb_composite_dev *cdev = dev->cdev; + struct sk_buff *skb = req->context; + int status = req->status; + int queue = 0; + + switch (status) { + case 0: + /* successful completion */ + skb_put(skb, req->actual); + queue = 1; + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + dev_kfree_skb_any(skb); + req->buf = 0; + rmnet_sdio_free_req(ep, req); + return; + default: + /* unexpected failure */ + ERROR(cdev, "RMNET %s response error %d, %d/%d\n", + ep->name, status, + req->actual, req->length); + dev_kfree_skb_any(skb); + break; + } + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + pr_info("%s: sdio data ch not open\n", __func__); + dev_kfree_skb_any(skb); + req->buf = 0; + rmnet_sdio_free_req(ep, req); + return; + } + + spin_lock(&dev->lock); + if (queue) { + __skb_queue_tail(&dev->rx_skb_queue, skb); + queue_work(dev->wq, &dev->data_rx_work); + } + + if (dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) { + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + spin_unlock(&dev->lock); + return; + } + spin_unlock(&dev->lock); + + status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC); + if (status) { + ERROR(cdev, "rmnet data rx enqueue err %d\n", status); + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + } +} + +static void rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_sdio_dev *dev = ep->driver_data; + struct sk_buff *skb = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + + switch (status) { + case 0: + /* successful completion */ + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + break; + default: + ERROR(cdev, "rmnet data tx ep error %d\n", status); + break; + } + + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->tx_idle); + dev->tx_idle_len++; + spin_unlock(&dev->lock); + dev_kfree_skb_any(skb); + + rmnet_sdio_start_tx(dev); +} + +static void rmnet_sdio_free_buf(struct rmnet_sdio_dev *dev) +{ + struct rmnet_sdio_qmi_buf *qmi; + struct usb_request *req; + struct list_head *act, *tmp; + struct sk_buff *skb; + unsigned long flags; + + + spin_lock_irqsave(&dev->lock, flags); + + dev->dpkt_tolaptop = 0; + dev->dpkt_tomodem = 0; + dev->cpkt_tolaptop = 0; + dev->cpkt_tomodem = 0; + dev->dpkts_pending_atdmux = 0; + dev->tx_drp_cnt = 0; + + /* free all usb requests in tx pool */ + list_for_each_safe(act, tmp, &dev->tx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + dev->tx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epout, req); + } + + /* free all usb requests in rx pool */ + list_for_each_safe(act, tmp, &dev->rx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + dev->rx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epin, req); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_req_q) { + qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qreq_q_len--; + rmnet_sdio_free_qmi(qmi); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_resp_q) { + qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qresp_q_len--; + rmnet_sdio_free_qmi(qmi); + } + + while ((skb = __skb_dequeue(&dev->tx_skb_queue))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&dev->rx_skb_queue))) + dev_kfree_skb_any(skb); + + rmnet_sdio_free_req(dev->epnotify, dev->notify_req); + + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_sdio_set_modem_cbits_w(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev; + + dev = container_of(w, struct rmnet_sdio_dev, set_modem_ctl_bits_work); + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + return; + + pr_debug("%s: cbits_to_modem:%d\n", + __func__, dev->cbits_to_modem); + + sdio_cmux_tiocmset(rmnet_sdio_ctl_ch, + dev->cbits_to_modem, + ~dev->cbits_to_modem); +} + +static void rmnet_sdio_disconnect_work(struct work_struct *w) +{ + /* REVISIT: Push all the data to sdio if anythign is pending */ +} +static void rmnet_sdio_suspend(struct usb_function *f) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + + if (!atomic_read(&dev->online)) + return; + /* This is a workaround for Windows Host bug during suspend. + * Windows 7/xp Hosts are suppose to drop DTR, when Host suspended. + * Since it is not beind done, Hence exclusively dropping the DTR + * from function driver suspend. + */ + dev->cbits_to_modem &= ~TIOCM_DTR; + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); +} +static void rmnet_sdio_disable(struct usb_function *f) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + + if (!atomic_read(&dev->online)) + return; + + usb_ep_disable(dev->epnotify); + usb_ep_disable(dev->epout); + usb_ep_disable(dev->epin); + + atomic_set(&dev->online, 0); + atomic_set(&dev->notify_count, 0); + rmnet_sdio_free_buf(dev); + + /* cleanup work */ + queue_work(dev->wq, &dev->disconnect_work); + dev->cbits_to_modem = 0; + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); +} + +static void rmnet_close_sdio_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev; + unsigned long flags; + struct usb_cdc_notification *event; + int status; + struct rmnet_sdio_qmi_buf *qmi; + struct usb_request *req; + struct sk_buff *skb; + + pr_debug("%s:\n", __func__); + + dev = container_of(w, struct rmnet_sdio_dev, sdio_close_work); + + if (!atomic_read(&dev->online)) + return; + + usb_ep_fifo_flush(dev->epnotify); + + spin_lock_irqsave(&dev->lock, flags); + event = dev->notify_req->buf; + + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + spin_unlock_irqrestore(&dev->lock, flags); + + status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_KERNEL); + if (status < 0) { + if (!atomic_read(&dev->online)) + return; + pr_err("%s: rmnet notify ep enqueue error %d\n", + __func__, status); + } + + usb_ep_fifo_flush(dev->epout); + usb_ep_fifo_flush(dev->epin); + cancel_work_sync(&dev->data_rx_work); + + spin_lock_irqsave(&dev->lock, flags); + + if (!atomic_read(&dev->online)) { + spin_unlock_irqrestore(&dev->lock, flags); + return; + } + + /* free all usb requests in tx pool */ + while (!list_empty(&dev->tx_idle)) { + req = list_first_entry(&dev->tx_idle, struct usb_request, list); + list_del(&req->list); + dev->tx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epout, req); + } + + /* free all usb requests in rx pool */ + while (!list_empty(&dev->rx_idle)) { + req = list_first_entry(&dev->rx_idle, struct usb_request, list); + list_del(&req->list); + dev->rx_idle_len--; + req->buf = NULL; + rmnet_sdio_free_req(dev->epin, req); + } + + /* free all buffers in qmi request pool */ + while (!list_empty(&dev->qmi_req_q)) { + qmi = list_first_entry(&dev->qmi_req_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qreq_q_len--; + rmnet_sdio_free_qmi(qmi); + } + + /* free all buffers in qmi response pool */ + while (!list_empty(&dev->qmi_resp_q)) { + qmi = list_first_entry(&dev->qmi_resp_q, + struct rmnet_sdio_qmi_buf, list); + list_del(&qmi->list); + dev->qresp_q_len--; + rmnet_sdio_free_qmi(qmi); + } + atomic_set(&dev->notify_count, 0); + + pr_info("%s: setting notify count to zero\n", __func__); + + + while ((skb = __skb_dequeue(&dev->tx_skb_queue))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&dev->rx_skb_queue))) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&dev->lock, flags); +} + +static int rmnet_sdio_start_io(struct rmnet_sdio_dev *dev) +{ + struct usb_request *req; + int ret, i; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (!atomic_read(&dev->online)) { + spin_unlock_irqrestore(&dev->lock, flags); + return 0; + } + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) || + !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + spin_unlock_irqrestore(&dev->lock, flags); + return 0; + } + + for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) { + req = rmnet_sdio_alloc_req(dev->epout, 0, GFP_ATOMIC); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + spin_unlock_irqrestore(&dev->lock, flags); + goto free_buf; + } + req->complete = rmnet_sdio_complete_epout; + list_add_tail(&req->list, &dev->rx_idle); + dev->rx_idle_len++; + } + for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) { + req = rmnet_sdio_alloc_req(dev->epin, 0, GFP_ATOMIC); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + spin_unlock_irqrestore(&dev->lock, flags); + goto free_buf; + } + req->complete = rmnet_sdio_complete_epin; + list_add_tail(&req->list, &dev->tx_idle); + dev->tx_idle_len++; + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* Queue Rx data requests */ + rmnet_sdio_start_rx(dev); + + return 0; + +free_buf: + rmnet_sdio_free_buf(dev); + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + return ret; +} + + +#define RMNET_SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000) +#define SDIO_SDIO_OPEN_MAX_RETRY 90 +static void rmnet_open_sdio_work(struct work_struct *w) +{ + struct rmnet_sdio_dev *dev = + container_of(w, struct rmnet_sdio_dev, + sdio_open_work.work); + struct usb_composite_dev *cdev = dev->cdev; + int ret; + static int retry_cnt; + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + /* Control channel for QMI messages */ + ret = sdio_cmux_open(rmnet_sdio_ctl_ch, + rmnet_sdio_ctl_receive_cb, + rmnet_sdio_ctl_write_done, + rmnet_sdio_sts_callback, dev); + if (!ret) + set_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + } + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + /* Data channel for network packets */ + ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev, + rmnet_sdio_data_receive_cb, + rmnet_sdio_data_write_done); + if (!ret) + set_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + } + + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) && + test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + + rmnet_sdio_start_io(dev); + + /* if usb cable is connected, update DTR status to modem */ + if (atomic_read(&dev->online)) + queue_work(dev->wq, &dev->set_modem_ctl_bits_work); + + pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n", + __func__, retry_cnt); + retry_cnt = 0; + return; + } + + retry_cnt++; + pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n", + __func__, retry_cnt); + + if (retry_cnt > SDIO_SDIO_OPEN_MAX_RETRY) { + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) + ERROR(cdev, "Unable to open control SDIO channel\n"); + + if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) + ERROR(cdev, "Unable to open DATA SDIO channel\n"); + + } else { + queue_delayed_work(dev->wq, &dev->sdio_open_work, + RMNET_SDIO_OPEN_RETRY_DELAY); + } +} + +static int rmnet_sdio_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + struct usb_composite_dev *cdev = dev->cdev; + int ret = 0; + + dev->epin->driver_data = dev; + usb_ep_enable(dev->epin, ep_choose(cdev->gadget, + &rmnet_sdio_hs_in_desc, + &rmnet_sdio_fs_in_desc)); + dev->epout->driver_data = dev; + usb_ep_enable(dev->epout, ep_choose(cdev->gadget, + &rmnet_sdio_hs_out_desc, + &rmnet_sdio_fs_out_desc)); + usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget, + &rmnet_sdio_hs_notify_desc, + &rmnet_sdio_fs_notify_desc)); + + /* allocate notification */ + dev->notify_req = rmnet_sdio_alloc_req(dev->epnotify, + RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC); + + if (IS_ERR(dev->notify_req)) { + ret = PTR_ERR(dev->notify_req); + pr_err("%s: unable to allocate memory for notify ep\n", + __func__); + return ret; + } + dev->notify_req->complete = rmnet_sdio_notify_complete; + dev->notify_req->context = dev; + dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE; + + atomic_set(&dev->online, 1); + + ret = rmnet_sdio_start_io(dev); + + return ret; + +} + +static int rmnet_sdio_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + int id; + struct usb_ep *ep; + + dev->cdev = cdev; + + /* allocate interface ID */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + dev->ifc_id = id; + rmnet_sdio_interface_desc.bInterfaceNumber = id; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_in_desc); + if (!ep) + goto out; + ep->driver_data = cdev; /* claim endpoint */ + dev->epin = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_out_desc); + if (!ep) + goto out; + ep->driver_data = cdev; /* claim endpoint */ + dev->epout = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_notify_desc); + if (!ep) + goto out; + ep->driver_data = cdev; /* claim endpoint */ + dev->epnotify = ep; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + rmnet_sdio_hs_in_desc.bEndpointAddress = + rmnet_sdio_fs_in_desc.bEndpointAddress; + rmnet_sdio_hs_out_desc.bEndpointAddress = + rmnet_sdio_fs_out_desc.bEndpointAddress; + rmnet_sdio_hs_notify_desc.bEndpointAddress = + rmnet_sdio_fs_notify_desc.bEndpointAddress; + } + + queue_delayed_work(dev->wq, &dev->sdio_open_work, 0); + + return 0; + +out: + if (dev->epnotify) + dev->epnotify->driver_data = NULL; + if (dev->epout) + dev->epout->driver_data = NULL; + if (dev->epin) + dev->epin->driver_data = NULL; + + return -ENODEV; +} + +static void +rmnet_sdio_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, + function); + + cancel_delayed_work_sync(&dev->sdio_open_work); + destroy_workqueue(dev->wq); + + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { + msm_sdio_dmux_close(rmnet_sdio_data_ch); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); + } + + if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { + sdio_cmux_close(rmnet_sdio_ctl_ch); + clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); + } + + debugfs_remove_recursive(dev->dent); + + kfree(dev); +} + +#if defined(CONFIG_DEBUG_FS) +static ssize_t rmnet_sdio_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct rmnet_sdio_dev *dev = file->private_data; + char *buf; + unsigned long flags; + int ret; + + buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_irqsave(&dev->lock, flags); + ret = scnprintf(buf, PAGE_SIZE, + "-*-DATA-*-\n" + "dpkts_tohost:%lu epInPool:%u tx_size:%u drp_cnt:%lu\n" + "dpkts_tomodem:%lu epOutPool:%u rx_size:%u pending:%u\n" + "-*-QMI-*-\n" + "cpkts_tomodem:%lu qmi_req_q:%u cbits:%d\n" + "cpkts_tolaptop:%lu qmi_resp_q:%u notify_cnt:%d\n" + "-*-MISC-*-\n" + "data_ch_status: %lu ctrl_ch_status: %lu\n", + /* data */ + dev->dpkt_tolaptop, dev->tx_idle_len, + dev->tx_skb_queue.qlen, dev->tx_drp_cnt, + dev->dpkt_tomodem, dev->rx_idle_len, + dev->rx_skb_queue.qlen, dev->dpkts_pending_atdmux, + /* qmi */ + dev->cpkt_tomodem, dev->qreq_q_len, + dev->cbits_to_modem, + dev->cpkt_tolaptop, dev->qresp_q_len, + atomic_read(&dev->notify_count), + /* misc */ + dev->data_ch_status, dev->ctrl_ch_status); + + spin_unlock_irqrestore(&dev->lock, flags); + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret); + + kfree(buf); + + return ret; +} + +static ssize_t rmnet_sdio_reset_stats(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct rmnet_sdio_dev *dev = file->private_data; + + dev->dpkt_tolaptop = 0; + dev->dpkt_tomodem = 0; + dev->cpkt_tolaptop = 0; + dev->cpkt_tomodem = 0; + dev->dpkts_pending_atdmux = 0; + dev->tx_drp_cnt = 0; + + /* TBD: How do we reset skb qlen + * it might have side effects + */ + + return count; +} + +static int debug_rmnet_sdio_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +const struct file_operations debug_rmnet_sdio_stats_ops = { + .open = debug_rmnet_sdio_open, + .read = rmnet_sdio_read_stats, + .write = rmnet_sdio_reset_stats, +}; + +static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev) +{ + dev->dent = debugfs_create_dir("usb_rmnet_sdio", 0); + if (IS_ERR(dev->dent)) + return; + + debugfs_create_file("status", 0444, dev->dent, dev, + &debug_rmnet_sdio_stats_ops); +} +#else +static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev) +{ + return; +} +#endif + +int rmnet_sdio_function_add(struct usb_configuration *c) +{ + struct rmnet_sdio_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->wq = create_singlethread_workqueue("k_rmnet_work"); + if (!dev->wq) { + ret = -ENOMEM; + goto free_dev; + } + + spin_lock_init(&dev->lock); + atomic_set(&dev->notify_count, 0); + atomic_set(&dev->online, 0); + + INIT_WORK(&dev->disconnect_work, rmnet_sdio_disconnect_work); + INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_sdio_set_modem_cbits_w); + + INIT_WORK(&dev->ctl_rx_work, rmnet_sdio_control_rx_work); + INIT_WORK(&dev->data_rx_work, rmnet_sdio_data_rx_work); + + INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work); + INIT_WORK(&dev->sdio_close_work, rmnet_close_sdio_work); + + INIT_LIST_HEAD(&dev->qmi_req_q); + INIT_LIST_HEAD(&dev->qmi_resp_q); + + INIT_LIST_HEAD(&dev->rx_idle); + INIT_LIST_HEAD(&dev->tx_idle); + skb_queue_head_init(&dev->tx_skb_queue); + skb_queue_head_init(&dev->rx_skb_queue); + + dev->function.name = "rmnet_sdio"; + dev->function.strings = rmnet_sdio_strings; + dev->function.descriptors = rmnet_sdio_fs_function; + dev->function.hs_descriptors = rmnet_sdio_hs_function; + dev->function.bind = rmnet_sdio_bind; + dev->function.unbind = rmnet_sdio_unbind; + dev->function.setup = rmnet_sdio_setup; + dev->function.set_alt = rmnet_sdio_set_alt; + dev->function.disable = rmnet_sdio_disable; + dev->function.suspend = rmnet_sdio_suspend; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto free_wq; + + rmnet_sdio_debugfs_init(dev); + + return 0; + +free_wq: + destroy_workqueue(dev->wq); +free_dev: + kfree(dev); + + return ret; +} diff --git a/drivers/usb/gadget/f_rmnet_smd.c b/drivers/usb/gadget/f_rmnet_smd.c new file mode 100644 index 00000000000..2049dc01fdd --- /dev/null +++ b/drivers/usb/gadget/f_rmnet_smd.c @@ -0,0 +1,1368 @@ +/* + * f_rmnet.c -- RmNet function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 Nokia Corporation + * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "gadget_chips.h" + +#ifndef CONFIG_MSM_SMD +#define CONFIG_RMNET_SMD_CTL_CHANNEL "" +#define CONFIG_RMNET_SMD_DATA_CHANNEL "" +#endif + +static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL; +module_param(rmnet_ctl_ch, charp, S_IRUGO); +MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel"); + +static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL; +module_param(rmnet_data_ch, charp, S_IRUGO); +MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel"); + +#define RMNET_SMD_ACM_CTRL_DTR (1 << 0) + +#define RMNET_SMD_NOTIFY_INTERVAL 5 +#define RMNET_SMD_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification) + +#define QMI_REQ_MAX 4 +#define QMI_REQ_SIZE 2048 +#define QMI_RESP_MAX 8 +#define QMI_RESP_SIZE 2048 + +#define RMNET_RX_REQ_MAX 8 +#define RMNET_RX_REQ_SIZE 2048 +#define RMNET_TX_REQ_MAX 8 +#define RMNET_TX_REQ_SIZE 2048 + +#define RMNET_TXN_MAX 2048 + +/* QMI requests & responses buffer*/ +struct qmi_buf { + void *buf; + int len; + struct list_head list; +}; + +/* Control & data SMD channel private data */ +struct rmnet_smd_ch_info { + struct smd_channel *ch; + struct tasklet_struct tx_tlet; + struct tasklet_struct rx_tlet; +#define CH_OPENED 0 + unsigned long flags; + /* pending rx packet length */ + atomic_t rx_pkt; + /* wait for smd open event*/ + wait_queue_head_t wait; +}; + +struct rmnet_smd_dev { + struct usb_function function; + struct usb_composite_dev *cdev; + + struct usb_ep *epout; + struct usb_ep *epin; + struct usb_ep *epnotify; + struct usb_request *notify_req; + + u8 ifc_id; + /* QMI lists */ + struct list_head qmi_req_pool; + struct list_head qmi_resp_pool; + struct list_head qmi_req_q; + struct list_head qmi_resp_q; + /* Tx/Rx lists */ + struct list_head tx_idle; + struct list_head rx_idle; + struct list_head rx_queue; + + spinlock_t lock; + atomic_t online; + atomic_t notify_count; + + struct platform_driver pdrv; + u8 is_pdrv_used; + struct rmnet_smd_ch_info smd_ctl; + struct rmnet_smd_ch_info smd_data; + + struct workqueue_struct *wq; + struct work_struct connect_work; + struct work_struct disconnect_work; + + unsigned long dpkts_to_host; + unsigned long dpkts_from_modem; + unsigned long dpkts_from_host; + unsigned long dpkts_to_modem; + + unsigned long cpkts_to_host; + unsigned long cpkts_from_modem; + unsigned long cpkts_from_host; + unsigned long cpkts_to_modem; +}; + +static struct rmnet_smd_dev *rmnet_smd; + +static struct usb_interface_descriptor rmnet_smd_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor rmnet_smd_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16( + RMNET_SMD_MAX_NOTIFY_SIZE), + .bInterval = 1 << RMNET_SMD_NOTIFY_INTERVAL, +}; + +static struct usb_endpoint_descriptor rmnet_smd_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_endpoint_descriptor rmnet_smd_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(64), +}; + +static struct usb_descriptor_header *rmnet_smd_fs_function[] = { + (struct usb_descriptor_header *) &rmnet_smd_interface_desc, + (struct usb_descriptor_header *) &rmnet_smd_fs_notify_desc, + (struct usb_descriptor_header *) &rmnet_smd_fs_in_desc, + (struct usb_descriptor_header *) &rmnet_smd_fs_out_desc, + NULL, +}; + +/* High speed support */ +static struct usb_endpoint_descriptor rmnet_smd_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16( + RMNET_SMD_MAX_NOTIFY_SIZE), + .bInterval = RMNET_SMD_NOTIFY_INTERVAL + 4, +}; + +static struct usb_endpoint_descriptor rmnet_smd_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor rmnet_smd_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(512), +}; + +static struct usb_descriptor_header *rmnet_smd_hs_function[] = { + (struct usb_descriptor_header *) &rmnet_smd_interface_desc, + (struct usb_descriptor_header *) &rmnet_smd_hs_notify_desc, + (struct usb_descriptor_header *) &rmnet_smd_hs_in_desc, + (struct usb_descriptor_header *) &rmnet_smd_hs_out_desc, + NULL, +}; + +/* String descriptors */ + +static struct usb_string rmnet_smd_string_defs[] = { + [0].s = "QMI RmNet", + { } /* end of list */ +}; + +static struct usb_gadget_strings rmnet_smd_string_table = { + .language = 0x0409, /* en-us */ + .strings = rmnet_smd_string_defs, +}; + +static struct usb_gadget_strings *rmnet_smd_strings[] = { + &rmnet_smd_string_table, + NULL, +}; + +static struct qmi_buf * +rmnet_smd_alloc_qmi(unsigned len, gfp_t kmalloc_flags) +{ + struct qmi_buf *qmi; + + qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags); + if (qmi != NULL) { + qmi->buf = kmalloc(len, kmalloc_flags); + if (qmi->buf == NULL) { + kfree(qmi); + qmi = NULL; + } + } + + return qmi ? qmi : ERR_PTR(-ENOMEM); +} + +static void rmnet_smd_free_qmi(struct qmi_buf *qmi) +{ + kfree(qmi->buf); + kfree(qmi); +} +/* + * Allocate a usb_request and its buffer. Returns a pointer to the + * usb_request or a error code if there is an error. + */ +static struct usb_request * +rmnet_smd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + req = NULL; + } + } + + return req ? req : ERR_PTR(-ENOMEM); +} + +/* + * Free a usb_request and its buffer. + */ +static void rmnet_smd_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static void rmnet_smd_notify_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&dev->notify_count, 0); + break; + default: + ERROR(cdev, "rmnet notify ep error %d\n", status); + /* FALLTHROUGH */ + case 0: + if (ep != dev->epnotify) + break; + + /* handle multiple pending QMI_RESPONSE_AVAILABLE + * notifications by resending until we're done + */ + if (atomic_dec_and_test(&dev->notify_count)) + break; + + status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enqueue error %d\n", + status); + } + break; + } +} + +static void qmi_smd_response_available(struct rmnet_smd_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req = dev->notify_req; + struct usb_cdc_notification *event = req->buf; + int status; + + /* Response will be sent later */ + if (atomic_inc_return(&dev->notify_count) != 1) + return; + + event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; + event->wValue = cpu_to_le16(0); + event->wIndex = cpu_to_le16(dev->ifc_id); + event->wLength = cpu_to_le16(0); + + status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC); + if (status < 0) { + atomic_dec(&dev->notify_count); + ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); + } +} + +/* TODO + * handle modem restart events + */ +static void rmnet_smd_event_notify(void *priv, unsigned event) +{ + struct rmnet_smd_ch_info *smd_info = priv; + int len = atomic_read(&smd_info->rx_pkt); + struct rmnet_smd_dev *dev = + (struct rmnet_smd_dev *) smd_info->tx_tlet.data; + + switch (event) { + case SMD_EVENT_DATA: { + if (!atomic_read(&dev->online)) + break; + if (len && (smd_write_avail(smd_info->ch) >= len)) + tasklet_schedule(&smd_info->rx_tlet); + + if (smd_read_avail(smd_info->ch)) + tasklet_schedule(&smd_info->tx_tlet); + + break; + } + case SMD_EVENT_OPEN: + /* usb endpoints are not enabled untill smd channels + * are opened. wake up worker thread to continue + * connection processing + */ + set_bit(CH_OPENED, &smd_info->flags); + wake_up(&smd_info->wait); + break; + case SMD_EVENT_CLOSE: + /* We will never come here. + * reset flags after closing smd channel + * */ + clear_bit(CH_OPENED, &smd_info->flags); + break; + } +} + +static void rmnet_control_tx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct qmi_buf *qmi_resp; + int sz; + unsigned long flags; + + while (1) { + sz = smd_cur_packet_size(dev->smd_ctl.ch); + if (sz == 0) + break; + if (smd_read_avail(dev->smd_ctl.ch) < sz) + break; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(&dev->qmi_resp_pool)) { + ERROR(cdev, "rmnet QMI Tx buffers full\n"); + spin_unlock_irqrestore(&dev->lock, flags); + break; + } + qmi_resp = list_first_entry(&dev->qmi_resp_pool, + struct qmi_buf, list); + list_del(&qmi_resp->list); + spin_unlock_irqrestore(&dev->lock, flags); + + qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz); + + spin_lock_irqsave(&dev->lock, flags); + dev->cpkts_from_modem++; + list_add_tail(&qmi_resp->list, &dev->qmi_resp_q); + spin_unlock_irqrestore(&dev->lock, flags); + + qmi_smd_response_available(dev); + } + +} + +static void rmnet_control_rx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct qmi_buf *qmi_req; + int ret; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + while (1) { + + if (list_empty(&dev->qmi_req_q)) { + atomic_set(&dev->smd_ctl.rx_pkt, 0); + break; + } + qmi_req = list_first_entry(&dev->qmi_req_q, + struct qmi_buf, list); + if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) { + atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len); + DBG(cdev, "rmnet control smd channel full\n"); + break; + } + + list_del(&qmi_req->list); + dev->cpkts_from_host++; + spin_unlock_irqrestore(&dev->lock, flags); + ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len); + spin_lock_irqsave(&dev->lock, flags); + if (ret != qmi_req->len) { + ERROR(cdev, "rmnet control smd write failed\n"); + break; + } + dev->cpkts_to_modem++; + list_add_tail(&qmi_req->list, &dev->qmi_req_pool); + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_smd_command_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + struct qmi_buf *qmi_req; + int ret; + + if (req->status < 0) { + ERROR(cdev, "rmnet command error %d\n", req->status); + return; + } + + spin_lock(&dev->lock); + dev->cpkts_from_host++; + /* no pending control rx packet */ + if (!atomic_read(&dev->smd_ctl.rx_pkt)) { + if (smd_write_avail(dev->smd_ctl.ch) < req->actual) { + atomic_set(&dev->smd_ctl.rx_pkt, req->actual); + goto queue_req; + } + spin_unlock(&dev->lock); + ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual); + /* This should never happen */ + if (ret != req->actual) + ERROR(cdev, "rmnet control smd write failed\n"); + spin_lock(&dev->lock); + dev->cpkts_to_modem++; + spin_unlock(&dev->lock); + return; + } +queue_req: + if (list_empty(&dev->qmi_req_pool)) { + spin_unlock(&dev->lock); + ERROR(cdev, "rmnet QMI pool is empty\n"); + return; + } + + qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list); + list_del(&qmi_req->list); + spin_unlock(&dev->lock); + memcpy(qmi_req->buf, req->buf, req->actual); + qmi_req->len = req->actual; + spin_lock(&dev->lock); + list_add_tail(&qmi_req->list, &dev->qmi_req_q); + spin_unlock(&dev->lock); +} +static void rmnet_txcommand_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + + spin_lock(&dev->lock); + dev->cpkts_to_host++; + spin_unlock(&dev->lock); +} + +static int +rmnet_smd_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int ret = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + struct qmi_buf *resp; + int schedule = 0; + + if (!atomic_read(&dev->online)) + return -ENOTCONN; + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + ret = w_length; + req->complete = rmnet_smd_command_complete; + req->context = dev; + break; + + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value) + goto invalid; + else { + spin_lock(&dev->lock); + if (list_empty(&dev->qmi_resp_q)) { + INFO(cdev, "qmi resp empty " + " req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + spin_unlock(&dev->lock); + goto invalid; + } + resp = list_first_entry(&dev->qmi_resp_q, + struct qmi_buf, list); + list_del(&resp->list); + spin_unlock(&dev->lock); + memcpy(req->buf, resp->buf, resp->len); + ret = resp->len; + spin_lock(&dev->lock); + + if (list_empty(&dev->qmi_resp_pool)) + schedule = 1; + list_add_tail(&resp->list, &dev->qmi_resp_pool); + + if (schedule) + tasklet_schedule(&dev->smd_ctl.tx_tlet); + spin_unlock(&dev->lock); + req->complete = rmnet_txcommand_complete; + req->context = dev; + } + break; + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + /* This is a workaround for RmNet and is borrowed from the + * CDC/ACM standard. The host driver will issue the above ACM + * standard request to the RmNet interface in the following + * scenario: Once the network adapter is disabled from device + * manager, the above request will be sent from the qcusbnet + * host driver, with DTR being '0'. Once network adapter is + * enabled from device manager (or during enumeration), the + * request will be sent with DTR being '1'. + */ + if (w_value & RMNET_SMD_ACM_CTRL_DTR) + ret = smd_tiocmset(dev->smd_ctl.ch, TIOCM_DTR, 0); + else + ret = smd_tiocmset(dev->smd_ctl.ch, 0, TIOCM_DTR); + + break; + default: + +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (ret >= 0) { + VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = ret; + ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (ret < 0) + ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); + } + + return ret; +} + +static void rmnet_smd_start_rx(struct rmnet_smd_dev *dev) +{ + struct usb_composite_dev *cdev = dev->cdev; + int status; + struct usb_request *req; + struct list_head *pool = &dev->rx_idle; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + while (!list_empty(pool)) { + req = list_entry(pool->next, struct usb_request, list); + list_del(&req->list); + + spin_unlock_irqrestore(&dev->lock, flags); + status = usb_ep_queue(dev->epout, req, GFP_ATOMIC); + spin_lock_irqsave(&dev->lock, flags); + + if (status) { + ERROR(cdev, "rmnet data rx enqueue err %d\n", status); + list_add_tail(&req->list, pool); + break; + } + } + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void rmnet_data_tx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int status; + int sz; + unsigned long flags; + + while (1) { + + sz = smd_cur_packet_size(dev->smd_data.ch); + if (sz == 0) + break; + if (smd_read_avail(dev->smd_data.ch) < sz) + break; + + spin_lock_irqsave(&dev->lock, flags); + if (list_empty(&dev->tx_idle)) { + spin_unlock_irqrestore(&dev->lock, flags); + DBG(cdev, "rmnet data Tx buffers full\n"); + break; + } + req = list_first_entry(&dev->tx_idle, struct usb_request, list); + list_del(&req->list); + spin_unlock_irqrestore(&dev->lock, flags); + + req->length = smd_read(dev->smd_data.ch, req->buf, sz); + status = usb_ep_queue(dev->epin, req, GFP_ATOMIC); + if (status) { + ERROR(cdev, "rmnet tx data enqueue err %d\n", status); + spin_lock_irqsave(&dev->lock, flags); + list_add_tail(&req->list, &dev->tx_idle); + spin_unlock_irqrestore(&dev->lock, flags); + break; + } + spin_lock_irqsave(&dev->lock, flags); + dev->dpkts_from_modem++; + spin_unlock_irqrestore(&dev->lock, flags); + } + +} + +static void rmnet_data_rx_tlet(unsigned long arg) +{ + struct rmnet_smd_dev *dev = (struct rmnet_smd_dev *) arg; + struct usb_composite_dev *cdev = dev->cdev; + struct usb_request *req; + int ret; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + while (1) { + if (list_empty(&dev->rx_queue)) { + atomic_set(&dev->smd_data.rx_pkt, 0); + break; + } + req = list_first_entry(&dev->rx_queue, + struct usb_request, list); + if (smd_write_avail(dev->smd_data.ch) < req->actual) { + atomic_set(&dev->smd_data.rx_pkt, req->actual); + DBG(cdev, "rmnet SMD data channel full\n"); + break; + } + + list_del(&req->list); + spin_unlock_irqrestore(&dev->lock, flags); + ret = smd_write(dev->smd_data.ch, req->buf, req->actual); + spin_lock_irqsave(&dev->lock, flags); + if (ret != req->actual) { + ERROR(cdev, "rmnet SMD data write failed\n"); + break; + } + dev->dpkts_to_modem++; + list_add_tail(&req->list, &dev->rx_idle); + } + spin_unlock_irqrestore(&dev->lock, flags); + + /* We have free rx data requests. */ + rmnet_smd_start_rx(dev); +} + +/* If SMD has enough room to accommodate a data rx packet, + * write into SMD directly. Otherwise enqueue to rx_queue. + * We will not write into SMD directly untill rx_queue is + * empty to strictly follow the ordering requests. + */ +static void rmnet_smd_complete_epout(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + int ret; + + switch (status) { + case 0: + /* normal completion */ + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->rx_idle); + spin_unlock(&dev->lock); + return; + default: + /* unexpected failure */ + ERROR(cdev, "RMNET %s response error %d, %d/%d\n", + ep->name, status, + req->actual, req->length); + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->rx_idle); + spin_unlock(&dev->lock); + return; + } + + spin_lock(&dev->lock); + dev->dpkts_from_host++; + if (!atomic_read(&dev->smd_data.rx_pkt)) { + if (smd_write_avail(dev->smd_data.ch) < req->actual) { + atomic_set(&dev->smd_data.rx_pkt, req->actual); + goto queue_req; + } + spin_unlock(&dev->lock); + ret = smd_write(dev->smd_data.ch, req->buf, req->actual); + /* This should never happen */ + if (ret != req->actual) + ERROR(cdev, "rmnet data smd write failed\n"); + /* Restart Rx */ + spin_lock(&dev->lock); + dev->dpkts_to_modem++; + list_add_tail(&req->list, &dev->rx_idle); + spin_unlock(&dev->lock); + rmnet_smd_start_rx(dev); + return; + } +queue_req: + list_add_tail(&req->list, &dev->rx_queue); + spin_unlock(&dev->lock); +} + +static void rmnet_smd_complete_epin(struct usb_ep *ep, struct usb_request *req) +{ + struct rmnet_smd_dev *dev = req->context; + struct usb_composite_dev *cdev = dev->cdev; + int status = req->status; + int schedule = 0; + + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + spin_lock(&dev->lock); + list_add_tail(&req->list, &dev->tx_idle); + spin_unlock(&dev->lock); + break; + default: + ERROR(cdev, "rmnet data tx ep error %d\n", status); + /* FALLTHROUGH */ + case 0: + spin_lock(&dev->lock); + if (list_empty(&dev->tx_idle)) + schedule = 1; + list_add_tail(&req->list, &dev->tx_idle); + dev->dpkts_to_host++; + if (schedule) + tasklet_schedule(&dev->smd_data.tx_tlet); + spin_unlock(&dev->lock); + break; + } + +} + +static void rmnet_smd_disconnect_work(struct work_struct *w) +{ + struct qmi_buf *qmi; + struct usb_request *req; + struct list_head *act, *tmp; + struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev, + disconnect_work); + + tasklet_kill(&dev->smd_ctl.rx_tlet); + tasklet_kill(&dev->smd_ctl.tx_tlet); + tasklet_kill(&dev->smd_data.rx_tlet); + tasklet_kill(&dev->smd_data.tx_tlet); + + smd_close(dev->smd_ctl.ch); + dev->smd_ctl.flags = 0; + + smd_close(dev->smd_data.ch); + dev->smd_data.flags = 0; + + atomic_set(&dev->notify_count, 0); + + list_for_each_safe(act, tmp, &dev->rx_queue) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + list_add_tail(&req->list, &dev->rx_idle); + } + + list_for_each_safe(act, tmp, &dev->qmi_req_q) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + list_add_tail(&qmi->list, &dev->qmi_req_pool); + } + + list_for_each_safe(act, tmp, &dev->qmi_resp_q) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + list_add_tail(&qmi->list, &dev->qmi_resp_pool); + } + + if (dev->is_pdrv_used) { + platform_driver_unregister(&dev->pdrv); + dev->is_pdrv_used = 0; + } +} + +/* SMD close may sleep + * schedule a work to close smd channels + */ +static void rmnet_smd_disable(struct usb_function *f) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + + if (!atomic_read(&dev->online)) + return; + + atomic_set(&dev->online, 0); + + usb_ep_fifo_flush(dev->epnotify); + usb_ep_disable(dev->epnotify); + usb_ep_fifo_flush(dev->epout); + usb_ep_disable(dev->epout); + + usb_ep_fifo_flush(dev->epin); + usb_ep_disable(dev->epin); + + /* cleanup work */ + queue_work(dev->wq, &dev->disconnect_work); +} + +static void rmnet_smd_connect_work(struct work_struct *w) +{ + struct rmnet_smd_dev *dev = container_of(w, struct rmnet_smd_dev, + connect_work); + struct usb_composite_dev *cdev = dev->cdev; + int ret = 0; + + /* Control channel for QMI messages */ + ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch, + &dev->smd_ctl, rmnet_smd_event_notify); + if (ret) { + ERROR(cdev, "Unable to open control smd channel: %d\n", ret); + /* + * Register platform driver to be notified in case SMD channels + * later becomes ready to be opened. + */ + ret = platform_driver_register(&dev->pdrv); + if (ret) + ERROR(cdev, "Platform driver %s register failed %d\n", + dev->pdrv.driver.name, ret); + else + dev->is_pdrv_used = 1; + + return; + } + wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED, + &dev->smd_ctl.flags)); + + /* Data channel for network packets */ + ret = smd_open(rmnet_data_ch, &dev->smd_data.ch, + &dev->smd_data, rmnet_smd_event_notify); + if (ret) { + ERROR(cdev, "Unable to open data smd channel\n"); + smd_close(dev->smd_ctl.ch); + return; + } + wait_event(dev->smd_data.wait, test_bit(CH_OPENED, + &dev->smd_data.flags)); + + atomic_set(&dev->online, 1); + /* Queue Rx data requests */ + rmnet_smd_start_rx(dev); +} + +static int rmnet_smd_ch_probe(struct platform_device *pdev) +{ + DBG(rmnet_smd->cdev, "Probe called for device: %s\n", pdev->name); + + queue_work(rmnet_smd->wq, &rmnet_smd->connect_work); + + return 0; +} + +/* SMD open may sleep. + * Schedule a work to open smd channels and enable + * endpoints if smd channels are opened successfully. + */ +static int rmnet_smd_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + struct usb_composite_dev *cdev = dev->cdev; + int ret = 0; + + ret = usb_ep_enable(dev->epin, ep_choose(cdev->gadget, + &rmnet_smd_hs_in_desc, + &rmnet_smd_fs_in_desc)); + if (ret) { + ERROR(cdev, "can't enable %s, result %d\n", + dev->epin->name, ret); + return ret; + } + ret = usb_ep_enable(dev->epout, ep_choose(cdev->gadget, + &rmnet_smd_hs_out_desc, + &rmnet_smd_fs_out_desc)); + if (ret) { + ERROR(cdev, "can't enable %s, result %d\n", + dev->epout->name, ret); + usb_ep_disable(dev->epin); + return ret; + } + + ret = usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget, + &rmnet_smd_hs_notify_desc, + &rmnet_smd_fs_notify_desc)); + if (ret) { + ERROR(cdev, "can't enable %s, result %d\n", + dev->epnotify->name, ret); + usb_ep_disable(dev->epin); + usb_ep_disable(dev->epout); + return ret; + } + + queue_work(dev->wq, &dev->connect_work); + return 0; +} + +static void rmnet_smd_free_buf(struct rmnet_smd_dev *dev) +{ + struct qmi_buf *qmi; + struct usb_request *req; + struct list_head *act, *tmp; + + dev->dpkts_to_host = 0; + dev->dpkts_from_modem = 0; + dev->dpkts_from_host = 0; + dev->dpkts_to_modem = 0; + + dev->cpkts_to_host = 0; + dev->cpkts_from_modem = 0; + dev->cpkts_from_host = 0; + dev->cpkts_to_modem = 0; + /* free all usb requests in tx pool */ + list_for_each_safe(act, tmp, &dev->tx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + rmnet_smd_free_req(dev->epout, req); + } + + /* free all usb requests in rx pool */ + list_for_each_safe(act, tmp, &dev->rx_idle) { + req = list_entry(act, struct usb_request, list); + list_del(&req->list); + rmnet_smd_free_req(dev->epin, req); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_req_pool) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + rmnet_smd_free_qmi(qmi); + } + + /* free all buffers in qmi request pool */ + list_for_each_safe(act, tmp, &dev->qmi_resp_pool) { + qmi = list_entry(act, struct qmi_buf, list); + list_del(&qmi->list); + rmnet_smd_free_qmi(qmi); + } + + rmnet_smd_free_req(dev->epnotify, dev->notify_req); +} +static int rmnet_smd_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + int i, id, ret; + struct qmi_buf *qmi; + struct usb_request *req; + struct usb_ep *ep; + + dev->cdev = cdev; + + /* allocate interface ID */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + dev->ifc_id = id; + rmnet_smd_interface_desc.bInterfaceNumber = id; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_in_desc); + if (!ep) + return -ENODEV; + ep->driver_data = cdev; /* claim endpoint */ + dev->epin = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_out_desc); + if (!ep) + return -ENODEV; + ep->driver_data = cdev; /* claim endpoint */ + dev->epout = ep; + + ep = usb_ep_autoconfig(cdev->gadget, &rmnet_smd_fs_notify_desc); + if (!ep) + return -ENODEV; + ep->driver_data = cdev; /* clain endpoint */ + dev->epnotify = ep; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + rmnet_smd_hs_in_desc.bEndpointAddress = + rmnet_smd_fs_in_desc.bEndpointAddress; + rmnet_smd_hs_out_desc.bEndpointAddress = + rmnet_smd_fs_out_desc.bEndpointAddress; + rmnet_smd_hs_notify_desc.bEndpointAddress = + rmnet_smd_fs_notify_desc.bEndpointAddress; + + } + + /* allocate notification */ + dev->notify_req = rmnet_smd_alloc_req(dev->epnotify, + RMNET_SMD_MAX_NOTIFY_SIZE, GFP_KERNEL); + if (IS_ERR(dev->notify_req)) + return PTR_ERR(dev->notify_req); + + dev->notify_req->complete = rmnet_smd_notify_complete; + dev->notify_req->context = dev; + dev->notify_req->length = RMNET_SMD_MAX_NOTIFY_SIZE; + + /* Allocate the qmi request and response buffers */ + for (i = 0; i < QMI_REQ_MAX; i++) { + qmi = rmnet_smd_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL); + if (IS_ERR(qmi)) { + ret = PTR_ERR(qmi); + goto free_buf; + } + list_add_tail(&qmi->list, &dev->qmi_req_pool); + } + + for (i = 0; i < QMI_RESP_MAX; i++) { + qmi = rmnet_smd_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL); + if (IS_ERR(qmi)) { + ret = PTR_ERR(qmi); + goto free_buf; + } + list_add_tail(&qmi->list, &dev->qmi_resp_pool); + } + + /* Allocate bulk in/out requests for data transfer */ + for (i = 0; i < RMNET_RX_REQ_MAX; i++) { + req = rmnet_smd_alloc_req(dev->epout, RMNET_RX_REQ_SIZE, + GFP_KERNEL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + goto free_buf; + } + req->length = RMNET_TXN_MAX; + req->context = dev; + req->complete = rmnet_smd_complete_epout; + list_add_tail(&req->list, &dev->rx_idle); + } + + for (i = 0; i < RMNET_TX_REQ_MAX; i++) { + req = rmnet_smd_alloc_req(dev->epin, RMNET_TX_REQ_SIZE, + GFP_KERNEL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + goto free_buf; + } + req->context = dev; + req->complete = rmnet_smd_complete_epin; + list_add_tail(&req->list, &dev->tx_idle); + } + + return 0; + +free_buf: + rmnet_smd_free_buf(dev); + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + return ret; +} + +#if defined(CONFIG_DEBUG_FS) +static ssize_t rmnet_smd_debug_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct rmnet_smd_dev *dev = file->private_data; + struct rmnet_smd_ch_info smd_ctl_info = dev->smd_ctl; + struct rmnet_smd_ch_info smd_data_info = dev->smd_data; + char *buf; + unsigned long flags; + int ret; + + buf = kzalloc(sizeof(char) * 512, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock_irqsave(&dev->lock, flags); + ret = scnprintf(buf, 512, + "smd_control_ch_opened: %lu\n" + "smd_data_ch_opened: %lu\n" + "usb online : %d\n" + "dpkts_from_modem: %lu\n" + "dpkts_to_host: %lu\n" + "pending_dpkts_to_host: %lu\n" + "dpkts_from_host: %lu\n" + "dpkts_to_modem: %lu\n" + "pending_dpkts_to_modem: %lu\n" + "cpkts_from_modem: %lu\n" + "cpkts_to_host: %lu\n" + "pending_cpkts_to_host: %lu\n" + "cpkts_from_host: %lu\n" + "cpkts_to_modem: %lu\n" + "pending_cpkts_to_modem: %lu\n" + "smd_read_avail_ctrl: %d\n" + "smd_write_avail_ctrl: %d\n" + "smd_read_avail_data: %d\n" + "smd_write_avail_data: %d\n", + smd_ctl_info.flags, smd_data_info.flags, + atomic_read(&dev->online), + dev->dpkts_from_modem, dev->dpkts_to_host, + (dev->dpkts_from_modem - dev->dpkts_to_host), + dev->dpkts_from_host, dev->dpkts_to_modem, + (dev->dpkts_from_host - dev->dpkts_to_modem), + dev->cpkts_from_modem, dev->cpkts_to_host, + (dev->cpkts_from_modem - dev->cpkts_to_host), + dev->cpkts_from_host, dev->cpkts_to_modem, + (dev->cpkts_from_host - dev->cpkts_to_modem), + smd_read_avail(dev->smd_ctl.ch), + smd_write_avail(dev->smd_ctl.ch), + smd_read_avail(dev->smd_data.ch), + smd_write_avail(dev->smd_data.ch)); + + spin_unlock_irqrestore(&dev->lock, flags); + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret); + + kfree(buf); + + return ret; +} + +static ssize_t rmnet_smd_debug_reset_stats(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct rmnet_smd_dev *dev = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + + dev->dpkts_to_host = 0; + dev->dpkts_from_modem = 0; + dev->dpkts_from_host = 0; + dev->dpkts_to_modem = 0; + + dev->cpkts_to_host = 0; + dev->cpkts_from_modem = 0; + dev->cpkts_from_host = 0; + dev->cpkts_to_modem = 0; + + spin_unlock_irqrestore(&dev->lock, flags); + + return count; +} + +static int rmnet_smd_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +const struct file_operations rmnet_smd_debug_stats_ops = { + .open = rmnet_smd_debug_open, + .read = rmnet_smd_debug_read_stats, + .write = rmnet_smd_debug_reset_stats, +}; + +struct dentry *dent_smd; +struct dentry *dent_smd_status; + +static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) +{ + + dent_smd = debugfs_create_dir("usb_rmnet_smd", 0); + if (IS_ERR(dent_smd)) + return; + + dent_smd_status = debugfs_create_file("status", 0444, dent_smd, dev, + &rmnet_smd_debug_stats_ops); + + if (!dent_smd_status) { + debugfs_remove(dent_smd); + dent_smd = NULL; + return; + } + + return; +} +#else +static void rmnet_smd_debugfs_init(struct rmnet_smd_dev *dev) {} +#endif + +static void +rmnet_smd_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct rmnet_smd_dev *dev = container_of(f, struct rmnet_smd_dev, + function); + + tasklet_kill(&dev->smd_ctl.rx_tlet); + tasklet_kill(&dev->smd_ctl.tx_tlet); + tasklet_kill(&dev->smd_data.rx_tlet); + tasklet_kill(&dev->smd_data.tx_tlet); + + flush_workqueue(dev->wq); + rmnet_smd_free_buf(dev); + dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */ + + destroy_workqueue(dev->wq); + debugfs_remove_recursive(dent_smd); + kfree(dev); + +} + +int rmnet_smd_bind_config(struct usb_configuration *c) +{ + struct rmnet_smd_dev *dev; + int ret; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + rmnet_smd = dev; + + dev->wq = create_singlethread_workqueue("k_rmnet_work"); + if (!dev->wq) { + ret = -ENOMEM; + goto free_dev; + } + + spin_lock_init(&dev->lock); + atomic_set(&dev->notify_count, 0); + atomic_set(&dev->online, 0); + atomic_set(&dev->smd_ctl.rx_pkt, 0); + atomic_set(&dev->smd_data.rx_pkt, 0); + + INIT_WORK(&dev->connect_work, rmnet_smd_connect_work); + INIT_WORK(&dev->disconnect_work, rmnet_smd_disconnect_work); + + tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet, + (unsigned long) dev); + tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet, + (unsigned long) dev); + tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet, + (unsigned long) dev); + tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet, + (unsigned long) dev); + + init_waitqueue_head(&dev->smd_ctl.wait); + init_waitqueue_head(&dev->smd_data.wait); + + dev->pdrv.probe = rmnet_smd_ch_probe; + dev->pdrv.driver.name = CONFIG_RMNET_SMD_CTL_CHANNEL; + dev->pdrv.driver.owner = THIS_MODULE; + + INIT_LIST_HEAD(&dev->qmi_req_pool); + INIT_LIST_HEAD(&dev->qmi_req_q); + INIT_LIST_HEAD(&dev->qmi_resp_pool); + INIT_LIST_HEAD(&dev->qmi_resp_q); + INIT_LIST_HEAD(&dev->rx_idle); + INIT_LIST_HEAD(&dev->rx_queue); + INIT_LIST_HEAD(&dev->tx_idle); + + dev->function.name = "rmnet"; + dev->function.strings = rmnet_smd_strings; + dev->function.descriptors = rmnet_smd_fs_function; + dev->function.hs_descriptors = rmnet_smd_hs_function; + dev->function.bind = rmnet_smd_bind; + dev->function.unbind = rmnet_smd_unbind; + dev->function.setup = rmnet_smd_setup; + dev->function.set_alt = rmnet_smd_set_alt; + dev->function.disable = rmnet_smd_disable; + + ret = usb_add_function(c, &dev->function); + if (ret) + goto free_wq; + + rmnet_smd_debugfs_init(dev); + + return 0; + +free_wq: + destroy_workqueue(dev->wq); +free_dev: + kfree(dev); + + return ret; +} diff --git a/drivers/usb/gadget/f_rmnet_smd_sdio.c b/drivers/usb/gadget/f_rmnet_smd_sdio.c index 0cac681b567..e39125d5761 100644 --- a/drivers/usb/gadget/f_rmnet_smd_sdio.c +++ b/drivers/usb/gadget/f_rmnet_smd_sdio.c @@ -42,6 +42,7 @@ #include #include #include +#include #ifdef CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL static uint32_t rmnet_mux_sdio_ctl_ch = CONFIG_RMNET_SMD_SDIO_CTL_CHANNEL; @@ -104,12 +105,6 @@ struct rmnet_mux_ctrl_pkt { struct list_head list; }; -enum usb_rmnet_mux_xport_type { - USB_RMNET_MUX_XPORT_UNDEFINED, - USB_RMNET_MUX_XPORT_SDIO, - USB_RMNET_MUX_XPORT_SMD, -}; - struct rmnet_mux_ctrl_dev { struct list_head tx_q; wait_queue_head_t tx_wait_q; @@ -176,7 +171,7 @@ struct rmnet_mux_dev { struct rmnet_mux_ctrl_dev ctrl_dev; u8 ifc_id; - enum usb_rmnet_mux_xport_type xport; + enum transport_type xport; spinlock_t lock; atomic_t online; atomic_t notify_count; @@ -291,18 +286,6 @@ static struct usb_gadget_strings *rmnet_mux_strings[] = { NULL, }; -static char *xport_to_str(enum usb_rmnet_mux_xport_type t) -{ - switch (t) { - case USB_RMNET_MUX_XPORT_SDIO: - return "SDIO"; - case USB_RMNET_MUX_XPORT_SMD: - return "SMD"; - default: - return "UNDEFINED"; - } -} - static struct rmnet_mux_ctrl_pkt *rmnet_mux_alloc_ctrl_pkt(unsigned len, gfp_t flags) { @@ -556,7 +539,7 @@ rmnet_mux_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req) int status = req->status; int queue = 0; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { dev_kfree_skb_any(skb); req->buf = 0; rmnet_mux_free_req(ep, req); @@ -614,7 +597,7 @@ rmnet_mux_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req) struct usb_composite_dev *cdev = dev->cdev; int status = req->status; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { dev_kfree_skb_any(skb); req->buf = 0; rmnet_mux_free_req(ep, req); @@ -797,7 +780,7 @@ rmnet_mux_smd_complete_epout(struct usb_ep *ep, struct usb_request *req) int status = req->status; int ret; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { rmnet_mux_free_req(ep, req); return; } @@ -857,7 +840,7 @@ static void rmnet_mux_smd_complete_epin(struct usb_ep *ep, int status = req->status; int schedule = 0; - if (dev->xport == USB_RMNET_MUX_XPORT_UNDEFINED) { + if (dev->xport == USB_GADGET_XPORT_UNDEF) { rmnet_mux_free_req(ep, req); return; } @@ -1209,7 +1192,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } pool = &sdio_dev->rx_idle; @@ -1218,7 +1201,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } while ((skb = __skb_dequeue(&sdio_dev->tx_skb_queue))) @@ -1232,7 +1215,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } pool = &smd_dev->rx_idle; @@ -1240,7 +1223,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } /* free all usb requests in SMD rx queue */ @@ -1248,7 +1231,7 @@ static void rmnet_mux_free_buf(struct rmnet_mux_dev *dev) while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } pool = &ctrl_dev->tx_q; @@ -1276,7 +1259,7 @@ static void rmnet_mux_disconnect_work(struct work_struct *w) struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev; struct rmnet_mux_ctrl_dev *ctrl_dev = &dev->ctrl_dev; - if (dev->xport == USB_RMNET_MUX_XPORT_SMD) { + if (dev->xport == USB_GADGET_XPORT_SMD) { tasklet_kill(&smd_dev->smd_data.rx_tlet); tasklet_kill(&smd_dev->smd_data.tx_tlet); } @@ -1414,8 +1397,8 @@ static ssize_t transport_store( { struct rmnet_mux_dev *dev = rmux_dev; int value; - enum usb_rmnet_mux_xport_type given_xport; - enum usb_rmnet_mux_xport_type t; + enum transport_type given_xport; + enum transport_type t; struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev; struct rmnet_mux_sdio_dev *sdio_dev = &dev->sdio_dev; struct list_head *pool; @@ -1425,15 +1408,15 @@ static ssize_t transport_store( unsigned long flags; if (!atomic_read(&dev->online)) { - pr_debug("%s: usb cable is not connected\n", __func__); + pr_err("%s: usb cable is not connected\n", __func__); return -EINVAL; } sscanf(buf, "%d", &value); if (value) - given_xport = USB_RMNET_MUX_XPORT_SDIO; + given_xport = USB_GADGET_XPORT_SDIO; else - given_xport = USB_RMNET_MUX_XPORT_SMD; + given_xport = USB_GADGET_XPORT_SMD; if (given_xport == dev->xport) { pr_err("%s: given_xport:%s cur_xport:%s doing nothing\n", @@ -1447,14 +1430,14 @@ static ssize_t transport_store( /* prevent any other pkts to/from usb */ t = dev->xport; - dev->xport = USB_RMNET_MUX_XPORT_UNDEFINED; - if (t != USB_RMNET_MUX_XPORT_UNDEFINED) { + dev->xport = USB_GADGET_XPORT_UNDEF; + if (t != USB_GADGET_XPORT_UNDEF) { usb_ep_fifo_flush(dev->epin); usb_ep_fifo_flush(dev->epout); } switch (t) { - case USB_RMNET_MUX_XPORT_SDIO: + case USB_GADGET_XPORT_SDIO: spin_lock_irqsave(&dev->lock, flags); /* tx_idle */ @@ -1465,7 +1448,7 @@ static ssize_t transport_store( req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } /* rx_idle */ @@ -1475,7 +1458,7 @@ static ssize_t transport_store( req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); req->buf = NULL; - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } /* tx_skb_queue */ @@ -1489,7 +1472,7 @@ static ssize_t transport_store( spin_unlock_irqrestore(&dev->lock, flags); break; - case USB_RMNET_MUX_XPORT_SMD: + case USB_GADGET_XPORT_SMD: /* close smd xport */ tasklet_kill(&smd_dev->smd_data.rx_tlet); tasklet_kill(&smd_dev->smd_data.tx_tlet); @@ -1500,7 +1483,7 @@ static ssize_t transport_store( while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epin, req); + rmnet_mux_free_req(dev->epout, req); } pool = &smd_dev->rx_idle; @@ -1508,7 +1491,7 @@ static ssize_t transport_store( while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } /* free all usb requests in SMD rx queue */ @@ -1516,7 +1499,7 @@ static ssize_t transport_store( while (!list_empty(pool)) { req = list_first_entry(pool, struct usb_request, list); list_del(&req->list); - rmnet_mux_free_req(dev->epout, req); + rmnet_mux_free_req(dev->epin, req); } spin_unlock_irqrestore(&dev->lock, flags); @@ -1528,10 +1511,10 @@ static ssize_t transport_store( dev->xport = given_xport; switch (dev->xport) { - case USB_RMNET_MUX_XPORT_SDIO: + case USB_GADGET_XPORT_SDIO: rmnet_mux_sdio_enable(dev); break; - case USB_RMNET_MUX_XPORT_SMD: + case USB_GADGET_XPORT_SMD: rmnet_mux_smd_enable(dev); break; default: @@ -1640,14 +1623,11 @@ static void rmnet_mux_sdio_init(struct rmnet_mux_sdio_dev *sdio_dev) static void rmnet_mux_unbind(struct usb_configuration *c, struct usb_function *f) { -/* Do not clear flags to avoid SMD open status mismatch */ -#if 0 struct rmnet_mux_dev *dev = container_of(f, struct rmnet_mux_dev, function); struct rmnet_mux_smd_dev *smd_dev = &dev->smd_dev; smd_dev->smd_data.flags = 0; -#endif } #if defined(CONFIG_DEBUG_FS) @@ -1947,7 +1927,6 @@ static int rmnet_mux_ctrl_device_init(struct rmnet_mux_dev *dev) static int rmnet_smd_sdio_function_add(struct usb_configuration *c) { struct rmnet_mux_dev *dev = rmux_dev; - int status; if (!dev) return -ENODEV; @@ -1965,16 +1944,6 @@ static int rmnet_smd_sdio_function_add(struct usb_configuration *c) dev->function.disable = rmnet_mux_disable; dev->function.suspend = rmnet_mux_suspend; - if (rmnet_mux_string_defs[0].id == 0) { - status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); - return status; - } - rmnet_mux_string_defs[0].id = status; - rmnet_mux_interface_desc.iInterface = status; - } - return usb_add_function(c, &dev->function); } diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 7a184326bb1..259d7f3509b 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -98,8 +98,6 @@ struct f_rndis { struct usb_endpoint_descriptor *notify_desc; struct usb_request *notify_req; atomic_t notify_count; - - atomic_t online; }; static inline struct f_rndis *func_to_rndis(struct usb_function *f) @@ -391,24 +389,29 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) { struct f_rndis *rndis = req->context; + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; int status; - - if (req->status < 0) { - pr_err("%s: staus error: %d\n", __func__, req->status); - return; - } - - if (!atomic_read(&rndis->online)) { - pr_warning("%s: usb rndis is not online\n", __func__); - return; - } + rndis_init_msg_type *buf; /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ // spin_lock(&dev->lock); status = rndis_msg_parser(rndis->config, (u8 *) req->buf); if (status < 0) - pr_err("[USB] RNDIS command error %d, %d/%d\n", + ERROR(cdev, "RNDIS command error %d, %d/%d\n", status, req->actual, req->length); + + buf = (rndis_init_msg_type *)req->buf; + + if (buf->MessageType == REMOTE_NDIS_INITIALIZE_MSG) { + if (buf->MaxTransferSize > 2048) + rndis->port.multi_pkt_xfer = 1; + else + rndis->port.multi_pkt_xfer = 0; + DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n", + __func__, buf->MaxTransferSize, + rndis->port.multi_pkt_xfer ? "enabled" : + "disabled"); + } // spin_unlock(&dev->lock); } @@ -423,12 +426,6 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); - if (!atomic_read(&rndis->online)) { - pr_warning("%s: usb rndis is not online\n", __func__); - return -ENOTCONN; - } - - /* composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ @@ -555,7 +552,6 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } else goto fail; - atomic_set(&rndis->online, 1); return 0; fail: return -EINVAL; @@ -566,8 +562,6 @@ static void rndis_disable(struct usb_function *f) struct f_rndis *rndis = func_to_rndis(f); struct usb_composite_dev *cdev = f->config->cdev; - atomic_set(&rndis->online, 0); - if (!rndis->notify->driver_data) return; @@ -775,6 +769,8 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f) rndis_deregister(rndis->config); rndis_exit(); + rndis_string_defs[0].id = 0; + if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); @@ -814,14 +810,14 @@ rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], if (!can_support_rndis(c) || !ethaddr) return -EINVAL; - /* setup RNDIS itself */ - status = rndis_init(); - if (status < 0) - return status; - /* maybe allocate device-global string IDs */ if (rndis_string_defs[0].id == 0) { + /* ... and setup RNDIS itself */ + status = rndis_init(); + if (status < 0) + return status; + /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c index 52b974344cf..de8c8ed51b3 100644 --- a/drivers/usb/gadget/f_serial.c +++ b/drivers/usb/gadget/f_serial.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include "u_serial.h" #include "gadget_chips.h" @@ -27,6 +27,7 @@ * CDC ACM driver. However, for many purposes it's just as functional * if you can arrange appropriate host side drivers. */ +#define GSERIAL_NO_PORTS 2 struct gser_descs { struct usb_endpoint_descriptor *in; @@ -75,18 +76,18 @@ struct f_gser { static unsigned int no_tty_ports; static unsigned int no_sdio_ports; static unsigned int no_smd_ports; +static unsigned int no_hsic_sports; static unsigned int nr_ports; static struct port_info { enum transport_type transport; - enum fserial_func_type func_type; unsigned port_num; unsigned client_port_num; } gserial_ports[GSERIAL_NO_PORTS]; static inline bool is_transport_sdio(enum transport_type t) { - if (t == USB_GADGET_FSERIAL_TRANSPORT_SDIO) + if (t == USB_GADGET_XPORT_SDIO) return 1; return 0; } @@ -183,14 +184,10 @@ static struct usb_endpoint_descriptor gser_fs_out_desc = { static struct usb_descriptor_header *gser_fs_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, #ifdef CONFIG_MODEM_SUPPORT -/* These descriptors may not be recognized by some OS. Mark it. - */ -/* (struct usb_descriptor_header *) &gser_header_desc, (struct usb_descriptor_header *) &gser_call_mgmt_descriptor, (struct usb_descriptor_header *) &gser_descriptor, (struct usb_descriptor_header *) &gser_union_desc, -*/ (struct usb_descriptor_header *) &gser_fs_notify_desc, #endif (struct usb_descriptor_header *) &gser_fs_in_desc, @@ -227,14 +224,10 @@ static struct usb_endpoint_descriptor gser_hs_out_desc = { static struct usb_descriptor_header *gser_hs_function[] = { (struct usb_descriptor_header *) &gser_interface_desc, #ifdef CONFIG_MODEM_SUPPORT -/* These descriptors may not be recognized by some OS. Mark it. - */ -/* (struct usb_descriptor_header *) &gser_header_desc, (struct usb_descriptor_header *) &gser_call_mgmt_descriptor, (struct usb_descriptor_header *) &gser_descriptor, (struct usb_descriptor_header *) &gser_union_desc, -*/ (struct usb_descriptor_header *) &gser_hs_notify_desc, #endif (struct usb_descriptor_header *) &gser_hs_in_desc, @@ -243,24 +236,9 @@ static struct usb_descriptor_header *gser_hs_function[] = { }; /* string descriptors: */ -static struct usb_string modem_string_defs[] = { - [0].s = "HTC Modem", - [1].s = "HTC 9k Modem", - { } /* end of list */ -}; - -static struct usb_gadget_strings modem_string_table = { - .language = 0x0409, /* en-us */ - .strings = modem_string_defs, -}; - -static struct usb_gadget_strings *modem_strings[] = { - &modem_string_table, - NULL, -}; static struct usb_string gser_string_defs[] = { - [0].s = "HTC Serial", + [0].s = "Generic Serial", { } /* end of list */ }; @@ -274,51 +252,16 @@ static struct usb_gadget_strings *gser_strings[] = { NULL, }; -static char *transport_to_str(enum transport_type t) -{ - switch (t) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: - return "TTY"; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: - return "SDIO"; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: - return "SMD"; - } - - return "NONE"; -} - -static enum transport_type serial_str_to_transport(const char *name) -{ - if (!strcasecmp("SDIO", name)) - return USB_GADGET_FSERIAL_TRANSPORT_SDIO; - if (!strcasecmp("SMD", name)) - return USB_GADGET_FSERIAL_TRANSPORT_SMD; - - return USB_GADGET_FSERIAL_TRANSPORT_TTY; -} - -static enum fserial_func_type serial_str_to_func_type(const char *name) -{ - if (!name) - return USB_FSER_FUNC_NONE; - - if (!strcasecmp("MODEM", name)) - return USB_FSER_FUNC_MODEM; - if (!strcasecmp("MODEM_MDM", name)) - return USB_FSER_FUNC_MODEM_MDM; - if (!strcasecmp("SERIAL", name)) - return USB_FSER_FUNC_SERIAL; - - return USB_FSER_FUNC_NONE; -} - static int gport_setup(struct usb_configuration *c) { int ret = 0; + int port_idx; + int i; - pr_debug("%s: no_tty_ports:%u no_sdio_ports: %u nr_ports:%u\n", - __func__, no_tty_ports, no_sdio_ports, nr_ports); + pr_debug("%s: no_tty_ports: %u no_sdio_ports: %u" + " no_smd_ports: %u no_hsic_sports: %u nr_ports: %u\n", + __func__, no_tty_ports, no_sdio_ports, no_smd_ports, + no_hsic_sports, nr_ports); if (no_tty_ports) ret = gserial_setup(c->cdev->gadget, no_tty_ports); @@ -326,33 +269,67 @@ static int gport_setup(struct usb_configuration *c) ret = gsdio_setup(c->cdev->gadget, no_sdio_ports); if (no_smd_ports) ret = gsmd_setup(c->cdev->gadget, no_smd_ports); + if (no_hsic_sports) { + port_idx = ghsic_data_setup(no_hsic_sports, USB_GADGET_SERIAL); + if (port_idx < 0) + return port_idx; + + for (i = 0; i < nr_ports; i++) { + if (gserial_ports[i].transport == + USB_GADGET_XPORT_HSIC) { + gserial_ports[i].client_port_num = port_idx; + port_idx++; + } + } + /*clinet port num is same for data setup and ctrl setup*/ + ret = ghsic_ctrl_setup(no_hsic_sports, USB_GADGET_SERIAL); + if (ret < 0) + return ret; + return 0; + } return ret; } static int gport_connect(struct f_gser *gser) { - unsigned port_num; + unsigned port_num; + int ret; - pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n", - __func__, transport_to_str(gser->transport), + pr_debug("%s: transport: %s f_gser: %p gserial: %p port_num: %d\n", + __func__, xport_to_str(gser->transport), gser, &gser->port, gser->port_num); port_num = gserial_ports[gser->port_num].client_port_num; switch (gser->transport) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: + case USB_GADGET_XPORT_TTY: gserial_connect(&gser->port, port_num); break; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: + case USB_GADGET_XPORT_SDIO: gsdio_connect(&gser->port, port_num); break; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: + case USB_GADGET_XPORT_SMD: gsmd_connect(&gser->port, port_num); break; + case USB_GADGET_XPORT_HSIC: + ret = ghsic_ctrl_connect(&gser->port, port_num); + if (ret) { + pr_err("%s: ghsic_ctrl_connect failed: err:%d\n", + __func__, ret); + return ret; + } + ret = ghsic_data_connect(&gser->port, port_num); + if (ret) { + pr_err("%s: ghsic_data_connect failed: err:%d\n", + __func__, ret); + ghsic_ctrl_disconnect(&gser->port, port_num); + return ret; + } + break; default: pr_err("%s: Un-supported transport: %s\n", __func__, - transport_to_str(gser->transport)); + xport_to_str(gser->transport)); return -ENODEV; } @@ -363,25 +340,29 @@ static int gport_disconnect(struct f_gser *gser) { unsigned port_num; - pr_debug("%s: transport:%s f_gser:%p gserial:%p port_num:%d\n", - __func__, transport_to_str(gser->transport), + pr_debug("%s: transport: %s f_gser: %p gserial: %p port_num: %d\n", + __func__, xport_to_str(gser->transport), gser, &gser->port, gser->port_num); port_num = gserial_ports[gser->port_num].client_port_num; switch (gser->transport) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: + case USB_GADGET_XPORT_TTY: gserial_disconnect(&gser->port); break; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: + case USB_GADGET_XPORT_SDIO: gsdio_disconnect(&gser->port, port_num); break; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: + case USB_GADGET_XPORT_SMD: gsmd_disconnect(&gser->port, port_num); break; + case USB_GADGET_XPORT_HSIC: + ghsic_ctrl_disconnect(&gser->port, port_num); + ghsic_data_disconnect(&gser->port, port_num); + break; default: pr_err("%s: Un-supported transport:%s\n", __func__, - transport_to_str(gser->transport)); + xport_to_str(gser->transport)); return -ENODEV; } @@ -862,59 +843,16 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) { struct f_gser *gser; int status; - struct port_info *p = &gserial_ports[port_num]; - - if (p->func_type == USB_FSER_FUNC_NONE) { - pr_info("%s: non function port : %d\n", __func__, port_num); - return 0; - } - pr_info("%s: type:%d, trasport: %s\n", __func__, p->func_type, - transport_to_str(p->transport)); /* REVISIT might want instance-specific strings to help * distinguish instances ... */ /* maybe allocate device-global string ID */ - /* HTC modem port_num is 0 */ -#if 0 - if (port_num != 0) { - if (gser_string_defs[0].id == 0) { - status = usb_string_id(c->cdev); - if (status < 0) - return status; - gser_string_defs[0].id = status; - } - } -#endif - - if (modem_string_defs[0].id == 0 && - p->func_type == USB_FSER_FUNC_MODEM) { - status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); - return status; - } - modem_string_defs[0].id = status; - } - - if (modem_string_defs[1].id == 0 && - p->func_type == USB_FSER_FUNC_MODEM_MDM) { - status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); - return status; - } - modem_string_defs[1].id = status; - } - - if (gser_string_defs[0].id == 0 && - p->func_type == USB_FSER_FUNC_SERIAL) { + if (gser_string_defs[0].id == 0) { status = usb_string_id(c->cdev); - if (status < 0) { - printk(KERN_ERR "%s: return %d\n", __func__, status); + if (status < 0) return status; - } gser_string_defs[0].id = status; } @@ -937,12 +875,10 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) gser->transport = gserial_ports[port_num].transport; #ifdef CONFIG_MODEM_SUPPORT /* We support only two ports for now */ - if (port_num == 0) { + if (port_num == 0) gser->port.func.name = "modem"; - } else + else gser->port.func.name = "nmea"; - - gser->port.func.setup = gser_setup; gser->port.connect = gser_connect; gser->port.get_dtr = gser_get_dtr; @@ -954,28 +890,6 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) gser->port.send_break = gser_send_break; #endif - switch (p->func_type) { - case USB_FSER_FUNC_MODEM: - gser->port.func.name = "modem"; - gser->port.func.strings = modem_strings; - gser_interface_desc.iInterface = modem_string_defs[0].id; - break; - case USB_FSER_FUNC_MODEM_MDM: - gser->port.func.name = "modem_mdm"; - gser->port.func.strings = modem_strings; - gser_interface_desc.iInterface = modem_string_defs[1].id; - break; - case USB_FSER_FUNC_SERIAL: - gser->port.func.name = "serial"; - gser->port.func.strings = gser_strings; - gser_interface_desc.iInterface = gser_string_defs[0].id; - break; - case USB_FSER_FUNC_NONE: - default : - break; - } - - status = usb_add_function(c, &gser->port.func); if (status) kfree(gser); @@ -985,39 +899,37 @@ int gser_bind_config(struct usb_configuration *c, u8 port_num) /** * gserial_init_port - bind a gserial_port to its transport */ -static int gserial_init_port(int port_num, const char *name, char *serial_type) +static int gserial_init_port(int port_num, const char *name) { enum transport_type transport; - enum fserial_func_type func_type; if (port_num >= GSERIAL_NO_PORTS) return -ENODEV; - transport = serial_str_to_transport(name); - func_type = serial_str_to_func_type(serial_type); - - pr_info("%s, port:%d, transport:%s, type:%d\n", __func__, - port_num, transport_to_str(transport), func_type); - - + transport = str_to_xport(name); + pr_debug("%s, port:%d, transport:%s\n", __func__, + port_num, xport_to_str(transport)); gserial_ports[port_num].transport = transport; - gserial_ports[port_num].func_type = func_type; gserial_ports[port_num].port_num = port_num; switch (transport) { - case USB_GADGET_FSERIAL_TRANSPORT_TTY: + case USB_GADGET_XPORT_TTY: gserial_ports[port_num].client_port_num = no_tty_ports; no_tty_ports++; break; - case USB_GADGET_FSERIAL_TRANSPORT_SDIO: + case USB_GADGET_XPORT_SDIO: gserial_ports[port_num].client_port_num = no_sdio_ports; no_sdio_ports++; break; - case USB_GADGET_FSERIAL_TRANSPORT_SMD: + case USB_GADGET_XPORT_SMD: gserial_ports[port_num].client_port_num = no_smd_ports; no_smd_ports++; break; + case USB_GADGET_XPORT_HSIC: + /*client port number will be updated in gport_setup*/ + no_hsic_sports++; + break; default: pr_err("%s: Un-supported transport transport: %u\n", __func__, gserial_ports[port_num].transport); diff --git a/drivers/usb/gadget/f_usbnet.c b/drivers/usb/gadget/f_usbnet.c deleted file mode 100644 index 1945b61c0de..00000000000 --- a/drivers/usb/gadget/f_usbnet.c +++ /dev/null @@ -1,840 +0,0 @@ -/* - * Gadget Driver for Motorola USBNet - * - * Copyright (C) 2009 Motorola, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* - * Macro Defines - */ - -#define EP0_BUFSIZE 256 -#define USBNET_FUNCTION_NAME "usbnet" - -/* Vendor Request to config IP */ -#define USBNET_SET_IP_ADDRESS 0x05 -#define USBNET_SET_SUBNET_MASK 0x06 -#define USBNET_SET_HOST_IP 0x07 - -/* Linux Network Interface */ -#define USB_MTU 1536 -#define MAX_BULK_TX_REQ_NUM 8 -#define MAX_BULK_RX_REQ_NUM 8 -#define MAX_INTR_RX_REQ_NUM 8 -#define STRING_INTERFACE 0 - -struct usbnet_context { - spinlock_t lock; /* For RX/TX list */ - struct net_device *dev; - - struct usb_gadget *gadget; - - struct usb_ep *bulk_in; - struct usb_ep *bulk_out; - struct usb_ep *intr_out; - u16 config; /* current USB config w_value */ - - struct list_head rx_reqs; - struct list_head tx_reqs; - - struct net_device_stats stats; - struct work_struct usbnet_config_wq; - u32 ip_addr; - u32 subnet_mask; - u32 router_ip; - u32 iff_flag; -}; - - -struct usbnet_device { - struct usb_function function; - struct usb_composite_dev *cdev; - struct usbnet_context *net_ctxt; -}; - -/* static strings, in UTF-8 */ -static struct usb_string usbnet_string_defs[] = { - [STRING_INTERFACE].s = "HTC Test Command", - { /* ZEROES END LIST */ }, -}; - -static struct usb_gadget_strings usbnet_string_table = { - .language = 0x0409, /* en-us */ - .strings = usbnet_string_defs, -}; - -static struct usb_gadget_strings *usbnet_strings[] = { - &usbnet_string_table, - NULL, -}; - - - -/* There is only one interface. */ - -static struct usb_interface_descriptor usbnet_intf_desc = { - .bLength = sizeof usbnet_intf_desc, - .bDescriptorType = USB_DT_INTERFACE, - - .bNumEndpoints = 3, - .bInterfaceClass = 0x02, - .bInterfaceSubClass = 0x0a, - .bInterfaceProtocol = 0x01, -}; - - -static struct usb_endpoint_descriptor usbnet_fs_bulk_in_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_IN, - .bmAttributes = USB_ENDPOINT_XFER_BULK, -}; - -static struct usb_endpoint_descriptor usbnet_fs_bulk_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_BULK, -}; - -static struct usb_endpoint_descriptor fs_intr_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_INT, - .bInterval = 1, -}; - -static struct usb_descriptor_header *fs_function[] = { - (struct usb_descriptor_header *) &usbnet_intf_desc, - (struct usb_descriptor_header *) &usbnet_fs_bulk_in_desc, - (struct usb_descriptor_header *) &usbnet_fs_bulk_out_desc, - (struct usb_descriptor_header *) &fs_intr_out_desc, - NULL, -}; - -static struct usb_endpoint_descriptor usbnet_hs_bulk_in_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_IN, - .bmAttributes = USB_ENDPOINT_XFER_BULK, - .wMaxPacketSize = __constant_cpu_to_le16(512), - .bInterval = 0, -}; - -static struct usb_endpoint_descriptor usbnet_hs_bulk_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_BULK, - .wMaxPacketSize = __constant_cpu_to_le16(512), - .bInterval = 0, -}; - -static struct usb_endpoint_descriptor hs_intr_out_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = USB_DIR_OUT, - .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = __constant_cpu_to_le16(64), - .bInterval = 1, -}; - -static struct usb_descriptor_header *hs_function[] = { - (struct usb_descriptor_header *) &usbnet_intf_desc, - (struct usb_descriptor_header *) &usbnet_hs_bulk_in_desc, - (struct usb_descriptor_header *) &usbnet_hs_bulk_out_desc, - (struct usb_descriptor_header *) &hs_intr_out_desc, - NULL, -}; - -#define DO_NOT_STOP_QUEUE 0 -#define STOP_QUEUE 1 - -#define USBNETDBG(context, fmt, args...) \ - do { \ - if (context && (context->gadget)) \ - dev_dbg(&(context->gadget->dev) , fmt , ## args); \ - } while (0) - -struct usbnet_device *_usbnet_dev; - -static inline struct usbnet_device *usbnet_func_to_dev(struct usb_function *f) -{ - return container_of(f, struct usbnet_device, function); -} - - -static int ether_queue_out(struct usb_request *req , - struct usbnet_context *context) -{ - unsigned long flags; - struct sk_buff *skb; - int ret; - - skb = alloc_skb(USB_MTU + NET_IP_ALIGN, GFP_ATOMIC); - if (!skb) { - USBNETDBG(context, "%s: failed to alloc skb\n", __func__); - ret = -ENOMEM; - goto fail; - } - - skb_reserve(skb, NET_IP_ALIGN); - - req->buf = skb->data; - req->length = USB_MTU; - req->context = skb; - - ret = usb_ep_queue(context->bulk_out, req, GFP_KERNEL); - if (ret == 0) - return 0; - else - kfree_skb(skb); -fail: - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->rx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - - return ret; -} - -struct usb_request *usb_get_recv_request(struct usbnet_context *context) -{ - unsigned long flags; - struct usb_request *req; - - spin_lock_irqsave(&context->lock, flags); - if (list_empty(&context->rx_reqs)) { - req = NULL; - } else { - req = list_first_entry(&context->rx_reqs, - struct usb_request, list); - list_del(&req->list); - } - spin_unlock_irqrestore(&context->lock, flags); - - return req; -} - -struct usb_request *usb_get_xmit_request(int stop_flag, struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - unsigned long flags; - struct usb_request *req; - - spin_lock_irqsave(&context->lock, flags); - if (list_empty(&context->tx_reqs)) { - req = NULL; - } else { - req = list_first_entry(&context->tx_reqs, - struct usb_request, list); - list_del(&req->list); - if (stop_flag == STOP_QUEUE && - list_empty(&context->tx_reqs)) - netif_stop_queue(dev); - } - spin_unlock_irqrestore(&context->lock, flags); - return req; -} - -static int usb_ether_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - struct usb_request *req; - unsigned long flags; - unsigned len; - int rc; - - req = usb_get_xmit_request(STOP_QUEUE, dev); - - if (!req) { - USBNETDBG(context, "%s: could not obtain tx request\n", - __func__); - return 1; - } - - /* Add 4 bytes CRC */ - skb->len += 4; - - /* ensure that we end with a short packet */ - len = skb->len; - if (!(len & 63) || !(len & 511)) - len++; - - req->context = skb; - req->buf = skb->data; - req->length = len; - - rc = usb_ep_queue(context->bulk_in, req, GFP_KERNEL); - if (rc != 0) { - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->tx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - - dev_kfree_skb_any(skb); - context->stats.tx_dropped++; - - USBNETDBG(context, - "%s: could not queue tx request\n", __func__); - } - - return 0; -} - -static void usb_ether_tx_timeout(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); -} - -static int usb_ether_open(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); - return 0; -} - -static int usb_ether_stop(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); - return 0; -} - -static struct net_device_stats *usb_ether_get_stats(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - USBNETDBG(context, "%s\n", __func__); - return &context->stats; -} - -static void usbnet_if_config(struct work_struct *work) -{ - struct ifreq ifr; - mm_segment_t saved_fs; - unsigned err; - struct sockaddr_in *sin; - struct usbnet_context *context = container_of(work, - struct usbnet_context, usbnet_config_wq); - - memset(&ifr, 0, sizeof(ifr)); - sin = (void *) &(ifr.ifr_ifru.ifru_addr); - strncpy(ifr.ifr_ifrn.ifrn_name, context->dev->name, - sizeof(ifr.ifr_ifrn.ifrn_name)); - sin->sin_family = AF_INET; - - sin->sin_addr.s_addr = context->ip_addr; - saved_fs = get_fs(); - set_fs(get_ds()); - err = devinet_ioctl(dev_net(context->dev), SIOCSIFADDR, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFADDR\n", __func__); - - sin->sin_addr.s_addr = context->subnet_mask; - err = devinet_ioctl(dev_net(context->dev), SIOCSIFNETMASK, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFNETMASK\n", __func__); - - sin->sin_addr.s_addr = context->ip_addr | ~(context->subnet_mask); - err = devinet_ioctl(dev_net(context->dev), SIOCSIFBRDADDR, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFBRDADDR\n", __func__); - - memset(&ifr, 0, sizeof(ifr)); - strncpy(ifr.ifr_ifrn.ifrn_name, context->dev->name, - sizeof(ifr.ifr_ifrn.ifrn_name)); - ifr.ifr_flags = ((context->dev->flags) | context->iff_flag); - err = devinet_ioctl(dev_net(context->dev), SIOCSIFFLAGS, &ifr); - if (err) - USBNETDBG(context, "%s: Error in SIOCSIFFLAGS\n", __func__); - - set_fs(saved_fs); -} - -static const struct net_device_ops usb_netdev_ops = { - .ndo_open = usb_ether_open, - .ndo_stop = usb_ether_stop, - .ndo_start_xmit = usb_ether_xmit, - .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = usb_ether_tx_timeout, - .ndo_get_stats = usb_ether_get_stats, -}; - -static void usb_ether_setup(struct net_device *dev) -{ - struct usbnet_context *context = netdev_priv(dev); - INIT_LIST_HEAD(&context->rx_reqs); - INIT_LIST_HEAD(&context->tx_reqs); - - spin_lock_init(&context->lock); - context->dev = dev; - dev->netdev_ops = &usb_netdev_ops; - ether_setup(dev); - - random_ether_addr(dev->dev_addr); -} - -/*-------------------------------------------------------------------------*/ -static void usbnet_cleanup(struct usbnet_device *dev) -{ - struct usbnet_context *context = dev->net_ctxt; - if (context) { - unregister_netdev(context->dev); - free_netdev(context->dev); - dev->net_ctxt = NULL; - } -} - -static void usbnet_unbind(struct usb_configuration *c, struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usb_composite_dev *cdev = c->cdev; - struct usbnet_context *context = dev->net_ctxt; - struct usb_request *req; - - dev->cdev = cdev; - - usb_ep_disable(context->bulk_in); - usb_ep_disable(context->bulk_out); - - /* Free BULK OUT Requests */ - while ((req = usb_get_recv_request(context))) - usb_ep_free_request(context->bulk_out, req); - - /* Free BULK IN Requests */ - while ((req = usb_get_xmit_request(DO_NOT_STOP_QUEUE, - context->dev))) { - usb_ep_free_request(context->bulk_in, req); - } - - context->config = 0; - - usbnet_cleanup(dev); -} - -static void ether_out_complete(struct usb_ep *ep, struct usb_request *req) -{ - struct sk_buff *skb = req->context; - struct usbnet_context *context = ep->driver_data; - - if (req->status == 0) { - dmac_inv_range((void *)req->buf, (void *)(req->buf + - req->actual)); - skb_put(skb, req->actual); - skb->protocol = eth_type_trans(skb, context->dev); - context->stats.rx_packets++; - context->stats.rx_bytes += req->actual; - netif_rx(skb); - } else { - dev_kfree_skb_any(skb); - context->stats.rx_errors++; - } - - /* don't bother requeuing if we just went offline */ - if ((req->status == -ENODEV) || (req->status == -ESHUTDOWN)) { - unsigned long flags; - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->rx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - } else { - if (ether_queue_out(req, context)) - USBNETDBG(context, "ether_out: cannot requeue\n"); - } -} - -static void ether_in_complete(struct usb_ep *ep, struct usb_request *req) -{ - unsigned long flags; - struct sk_buff *skb = req->context; - struct usbnet_context *context = ep->driver_data; - - if (req->status == 0) { - context->stats.tx_packets++; - context->stats.tx_bytes += req->actual; - } else { - context->stats.tx_errors++; - } - - dev_kfree_skb_any(skb); - - spin_lock_irqsave(&context->lock, flags); - if (list_empty(&context->tx_reqs)) - netif_start_queue(context->dev); - - list_add_tail(&req->list, &context->tx_reqs); - spin_unlock_irqrestore(&context->lock, flags); -} - -static int usbnet_bind(struct usb_configuration *c, - struct usb_function *f) -{ - struct usb_composite_dev *cdev = c->cdev; - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - int n, rc, id; - struct usb_ep *ep; - struct usb_request *req; - unsigned long flags; - - dev->cdev = cdev; - - id = usb_interface_id(c, f); - if (id < 0) - return id; - usbnet_intf_desc.bInterfaceNumber = id; - context->gadget = cdev->gadget; - - /* Find all the endpoints we will use */ - ep = usb_ep_autoconfig(cdev->gadget, &usbnet_fs_bulk_in_desc); - if (!ep) { - USBNETDBG(context, "%s auto-configure usbnet_hs_bulk_in_desc error\n", - __func__); - goto autoconf_fail; - } - ep->driver_data = context; - context->bulk_in = ep; - - ep = usb_ep_autoconfig(cdev->gadget, &usbnet_fs_bulk_out_desc); - if (!ep) { - USBNETDBG(context, "%s auto-configure usbnet_hs_bulk_out_desc error\n", - __func__); - goto autoconf_fail; - } - ep->driver_data = context; - context->bulk_out = ep; - - - ep = usb_ep_autoconfig(cdev->gadget, &fs_intr_out_desc); - if (!ep) { - USBNETDBG(context, "%s auto-configure hs_intr_out_desc error\n", - __func__); - goto autoconf_fail; - } - ep->driver_data = context; - context->intr_out = ep; - - if (gadget_is_dualspeed(cdev->gadget)) { - - /* Assume endpoint addresses are the same for both speeds */ - usbnet_hs_bulk_in_desc.bEndpointAddress = - usbnet_fs_bulk_in_desc.bEndpointAddress; - usbnet_hs_bulk_out_desc.bEndpointAddress = - usbnet_fs_bulk_out_desc.bEndpointAddress; - hs_intr_out_desc.bEndpointAddress = - fs_intr_out_desc.bEndpointAddress; - } - - - rc = -ENOMEM; - - for (n = 0; n < MAX_BULK_RX_REQ_NUM; n++) { - req = usb_ep_alloc_request(context->bulk_out, - GFP_KERNEL); - if (!req) { - USBNETDBG(context, "%s: alloc request bulk_out fail\n", - __func__); - break; - } - req->complete = ether_out_complete; - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->rx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - } - for (n = 0; n < MAX_BULK_TX_REQ_NUM; n++) { - req = usb_ep_alloc_request(context->bulk_in, - GFP_KERNEL); - if (!req) { - USBNETDBG(context, "%s: alloc request bulk_in fail\n", - __func__); - break; - } - req->complete = ether_in_complete; - spin_lock_irqsave(&context->lock, flags); - list_add_tail(&req->list, &context->tx_reqs); - spin_unlock_irqrestore(&context->lock, flags); - } - - return 0; - -autoconf_fail: - rc = -ENOTSUPP; - usbnet_unbind(c, f); - return rc; -} - - - - -static void do_set_config(struct usb_function *f, u16 new_config) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - int result = 0; - struct usb_request *req; - int high_speed_flag = 0; - - if (context->config == new_config) /* Config did not change */ - return; - - context->config = new_config; - - if (new_config == 1) { /* Enable End points */ - if (gadget_is_dualspeed(context->gadget) - && context->gadget->speed == USB_SPEED_HIGH) - high_speed_flag = 1; - - if (high_speed_flag) - result = usb_ep_enable(context->bulk_in, - &usbnet_hs_bulk_in_desc); - else - result = usb_ep_enable(context->bulk_in, - &usbnet_fs_bulk_in_desc); - - if (result != 0) { - USBNETDBG(context, - "%s: failed to enable BULK_IN EP ret=%d\n", - __func__, result); - } - - context->bulk_in->driver_data = context; - - if (high_speed_flag) - result = usb_ep_enable(context->bulk_out, - &usbnet_hs_bulk_out_desc); - else - result = usb_ep_enable(context->bulk_out, - &usbnet_fs_bulk_out_desc); - - if (result != 0) { - USBNETDBG(context, - "%s: failed to enable BULK_OUT EP ret = %d\n", - __func__, result); - } - - context->bulk_out->driver_data = context; - - if (high_speed_flag) - result = usb_ep_enable(context->intr_out, - &hs_intr_out_desc); - else - result = usb_ep_enable(context->intr_out, - &fs_intr_out_desc); - - if (result != 0) { - USBNETDBG(context, - "%s: failed to enable INTR_OUT EP ret = %d\n", - __func__, result); - } - - context->intr_out->driver_data = context; - - /* we're online -- get all rx requests queued */ - while ((req = usb_get_recv_request(context))) { - if (ether_queue_out(req, context)) { - USBNETDBG(context, - "%s: ether_queue_out failed\n", - __func__); - break; - } - } - - } else {/* Disable Endpoints */ - if (context->bulk_in) - usb_ep_disable(context->bulk_in); - if (context->bulk_out) - usb_ep_disable(context->bulk_out); - } -} - - -static int usbnet_set_alt(struct usb_function *f, - unsigned intf, unsigned alt) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "usbnet_set_alt intf: %d alt: %d\n", intf, alt); - do_set_config(f, 1); - return 0; -} - -static int usbnet_ctrlrequest(struct usb_composite_dev *cdev, - const struct usb_ctrlrequest *ctrl) -{ - struct usbnet_device *dev = _usbnet_dev; - struct usbnet_context *context = dev->net_ctxt; - int rc = -EOPNOTSUPP; - int wIndex = le16_to_cpu(ctrl->wIndex); - int wValue = le16_to_cpu(ctrl->wValue); - int wLength = le16_to_cpu(ctrl->wLength); - struct usb_request *req = cdev->req; - - USBNETDBG(context, "usbnet_ctrlrequest " - "%02x.%02x v%04x i%04x l%u\n", - ctrl->bRequestType, ctrl->bRequest, - wValue, wIndex, wLength); - - if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { - switch (ctrl->bRequest) { - case USBNET_SET_IP_ADDRESS: - context->ip_addr = (wValue << 16) | wIndex; - rc = 0; - break; - case USBNET_SET_SUBNET_MASK: - context->subnet_mask = (wValue << 16) | wIndex; - rc = 0; - break; - case USBNET_SET_HOST_IP: - context->router_ip = (wValue << 16) | wIndex; - rc = 0; - break; - default: - break; - } - - if (context->ip_addr && context->subnet_mask - && context->router_ip) { - context->iff_flag = IFF_UP; - /* schedule a work queue to do this because we - need to be able to sleep */ - schedule_work(&context->usbnet_config_wq); - } - } - - /* respond with data transfer or status phase? */ - if (rc >= 0) { - req->zero = rc < wLength; - req->length = rc; - rc = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); - if (rc < 0) - USBNETDBG(context, "usbnet setup response error\n"); - } - return rc; -} - -static void usbnet_disable(struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "%s\n", __func__); - do_set_config(f, 0); -} - -static void usbnet_suspend(struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "%s\n", __func__); -} - -static void usbnet_resume(struct usb_function *f) -{ - struct usbnet_device *dev = usbnet_func_to_dev(f); - struct usbnet_context *context = dev->net_ctxt; - USBNETDBG(context, "%s\n", __func__); -} - -static int usbnet_bind_config(struct usb_configuration *c) -{ - struct usbnet_device *dev = _usbnet_dev; - struct usbnet_context *context; - struct net_device *net_dev; - int ret, status; - - net_dev = alloc_netdev(sizeof(struct usbnet_context), - "usb%d", usb_ether_setup); - if (!net_dev) { - pr_err("%s: alloc_netdev error\n", __func__); - return -EINVAL; - } - net_dev->netdev_ops = &usb_netdev_ops; - - ret = register_netdev(net_dev); - if (ret) { - pr_err("%s: register_netdev error\n", __func__); - free_netdev(net_dev); - return -EINVAL; - } - context = netdev_priv(net_dev); - INIT_WORK(&context->usbnet_config_wq, usbnet_if_config); - - status = usb_string_id(c->cdev); - if (status >= 0) { - usbnet_string_defs[STRING_INTERFACE].id = status; - usbnet_intf_desc.iInterface = status; - } - - context->config = 0; - dev->net_ctxt = context; - dev->cdev = c->cdev; - dev->function.name = USBNET_FUNCTION_NAME; - dev->function.descriptors = fs_function; - dev->function.hs_descriptors = hs_function; - dev->function.bind = usbnet_bind; - dev->function.unbind = usbnet_unbind; - dev->function.set_alt = usbnet_set_alt; - dev->function.disable = usbnet_disable; - dev->function.suspend = usbnet_suspend; - dev->function.resume = usbnet_resume; - dev->function.strings = usbnet_strings; - - ret = usb_add_function(c, &dev->function); - if (ret) - goto err1; - - pr_info("%s\n", __func__); - - return 0; - -err1: - kfree(dev); - pr_err("usbnet gadget driver failed to initialize\n"); - usbnet_cleanup(dev); - return ret; -} - -static int usbnet_setup(void) -{ - struct usbnet_device *dev = _usbnet_dev; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - _usbnet_dev = dev; - - return 0; -} diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index 0360f56221e..e358130a485 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c @@ -2553,7 +2553,7 @@ static int do_scsi_command(struct fsg_dev *fsg) fsg->data_size_from_cmnd = 0; sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); if ((reply = check_command(fsg, fsg->cmnd_size, - DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { + DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) { fsg->curlun->sense_data = SS_INVALID_COMMAND; reply = -EINVAL; } diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 4e483316808..44d789d27cf 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -717,6 +717,8 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); lastreq->tail->next_td_ptr = cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK); + /* Ensure dTD's next dtd pointer to be updated */ + wmb(); /* Read prime bit, if 1 goto done */ if (fsl_readl(&dr_regs->endpointprime) & bitmask) goto out; @@ -767,7 +769,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) * @is_last: return flag if it is the last dTD of the request * return: pointer to the built dTD */ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, - dma_addr_t *dma, int *is_last) + dma_addr_t *dma, int *is_last, gfp_t gfp_flags) { u32 swap_temp; struct ep_td_struct *dtd; @@ -776,7 +778,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, *length = min(req->req.length - req->req.actual, (unsigned)EP_MAX_LENGTH_TRANSFER); - dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma); + dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma); if (dtd == NULL) return dtd; @@ -826,7 +828,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, } /* Generate dtd chain for a request */ -static int fsl_req_to_dtd(struct fsl_req *req) +static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags) { unsigned count; int is_last; @@ -835,7 +837,7 @@ static int fsl_req_to_dtd(struct fsl_req *req) dma_addr_t dma; do { - dtd = fsl_build_dtd(req, &count, &dma, &is_last); + dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags); if (dtd == NULL) return -ENOMEM; @@ -909,13 +911,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) req->req.actual = 0; req->dtd_count = 0; - spin_lock_irqsave(&udc->lock, flags); - /* build dtds and push them to device queue */ - if (!fsl_req_to_dtd(req)) { + if (!fsl_req_to_dtd(req, gfp_flags)) { + spin_lock_irqsave(&udc->lock, flags); fsl_queue_td(ep, req); } else { - spin_unlock_irqrestore(&udc->lock, flags); return -ENOMEM; } @@ -1294,7 +1294,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction) ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->mapped = 1; - if (fsl_req_to_dtd(req) == 0) + if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0) fsl_queue_td(ep, req); else return -ENOMEM; @@ -1378,7 +1378,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value, req->mapped = 1; /* prime the data phase */ - if ((fsl_req_to_dtd(req) == 0)) + if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0)) fsl_queue_td(ep, req); else /* no mem */ goto stall; diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c index 2523e54097b..79afa8256cb 100644 --- a/drivers/usb/gadget/hid.c +++ b/drivers/usb/gadget/hid.c @@ -69,9 +69,9 @@ static struct usb_device_descriptor device_desc = { /* .bDeviceClass = USB_CLASS_COMM, */ /* .bDeviceSubClass = 0, */ /* .bDeviceProtocol = 0, */ - .bDeviceClass = 0xEF, - .bDeviceSubClass = 2, - .bDeviceProtocol = 1, + .bDeviceClass = USB_CLASS_PER_INTERFACE, + .bDeviceSubClass = 0, + .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id can be overridden by module parameters. */ diff --git a/drivers/usb/gadget/htc_attr.c b/drivers/usb/gadget/htc_attr.c index 4247ab53e31..072564f96e2 100644 --- a/drivers/usb/gadget/htc_attr.c +++ b/drivers/usb/gadget/htc_attr.c @@ -12,647 +12,27 @@ * */ -#include -#include - -enum { - USB_FUNCTION_UMS = 0, - USB_FUNCTION_ADB = 1, - USB_FUNCTION_RNDIS, - USB_FUNCTION_DIAG, - USB_FUNCTION_SERIAL, - USB_FUNCTION_PROJECTOR, - USB_FUNCTION_FSYNC, - USB_FUNCTION_MTP, - USB_FUNCTION_MODEM, /* 8 */ - USB_FUNCTION_ECM, - USB_FUNCTION_ACM, - USB_FUNCTION_DIAG_MDM, /* 11 */ - USB_FUNCTION_RMNET, - USB_FUNCTION_ACCESSORY, - USB_FUNCTION_MODEM_MDM, /* 14 */ - USB_FUNCTION_MTP36, - USB_FUNCTION_USBNET, - USB_FUNCTION_RNDIS_IPT = 31, -}; - -struct usb_string_node{ - u32 usb_function_flag; - char *name; -}; - -static struct usb_string_node usb_string_array[] = { - { - .usb_function_flag = 1 << USB_FUNCTION_UMS, - .name = "mass_storage", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ADB, - .name = "adb", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_RNDIS, - .name = "rndis", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_DIAG, - .name = "diag", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_SERIAL, - .name = "serial", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_PROJECTOR, - .name = "projector", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_MODEM, - .name = "modem", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ECM, - .name = "cdc_ethernet", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ACM, - .name = "acm", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_DIAG_MDM, - .name = "diag_mdm", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_RMNET, - .name = "rmnet", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_ACCESSORY, - .name = "accessory", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_MODEM_MDM, - .name = "modem_mdm", - }, - { - .usb_function_flag = 1 << USB_FUNCTION_MTP, - .name = "mtp", - }, - -}; - -static int use_mfg_serialno; -static char mfg_df_serialno[16]; -static int intrsharing; - -#define PID_RNDIS 0x0ffe -#define PID_ECM 0x0ff8 -#define PID_ACM 0x0ff4 -#define PID_USBNET 0x0fcd - -/* for htc in-house device attribute, htc_usb_attr.c */ -void android_force_reset(void) -{ - if (_android_dev && _android_dev->cdev) - usb_composite_force_reset(_android_dev->cdev); -} - -static bool isFunctionDisabled(struct android_usb_function *function) -{ - struct android_usb_function *f; - struct list_head *list = &_android_dev->enabled_functions; - - list_for_each_entry(f, list, enabled_list) { - if (!strcmp(function->name, f->name)) - return false; - } - return true; -} - -static int product_has_function(struct android_usb_product *p, - struct android_usb_function *f) -{ - char **functions = p->functions; - int count = p->num_functions; - const char *name = f->name; - int i; - - for (i = 0; i < count; i++) { - if (!strncmp(name, functions[i], strlen(name))) - return 1; - } - return 0; -} - -static int product_matches_functions(struct android_usb_product *p, - struct list_head *list) -{ - int count = 0; - struct android_usb_function *f; - list_for_each_entry(f, list, enabled_list) { - count++; - if (product_has_function(p, f) == isFunctionDisabled(f)) - return 0; - } - - if (count == p->num_functions) - return 1; - else - return 0; -} - -static int get_product_id(struct android_dev *dev, struct list_head *list) -{ - struct android_usb_product *p = dev->products; - int count = dev->num_products; - int i; - - if (p) { - for (i = 0; i < count; i++, p++) { - if (product_matches_functions(p, list)) - return p->product_id; - } - } - /* use default product ID */ - return dev->pdata->product_id; -} - -static struct android_usb_product *get_product(struct android_dev *dev, struct list_head *list) -{ - struct android_usb_product *p = dev->products; - int count = dev->num_products; - int i; - - if (p) { - for (i = 0; i < count; i++, p++) { - if (product_matches_functions(p, list)) - return p; - } - } - return NULL; -} - - -static unsigned int htc_usb_get_func_combine_value(void) -{ - struct android_dev *dev = _android_dev; - struct android_usb_function *f; - int i; - unsigned int val = 0; - - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - for (i = 0; i < ARRAY_SIZE(usb_string_array); i++) - if (!strcmp(usb_string_array[i].name, f->name)) { - val |= usb_string_array[i].usb_function_flag; - break; - } - } - return val; -} -static DEFINE_MUTEX(function_bind_sem); -int htc_usb_enable_function(char *name, int ebl) -{ - int i; - unsigned val; - - mutex_lock(&function_bind_sem); - - val = htc_usb_get_func_combine_value(); - - for (i = 0; i < ARRAY_SIZE(usb_string_array); i++) { - if (!strcmp(usb_string_array[i].name, name)) { - if (ebl) { - if (val & usb_string_array[i].usb_function_flag) { - pr_info("%s: '%s' is already enabled\n", __func__, name); - mutex_unlock(&function_bind_sem); - return 0; - } - val |= usb_string_array[i].usb_function_flag; - } else { - if (!(val & usb_string_array[i].usb_function_flag)) { - pr_info("%s: '%s' is already disabled\n", __func__, name); - mutex_unlock(&function_bind_sem); - return 0; - } - - val &= ~usb_string_array[i].usb_function_flag; - } - break; - } - } - mutex_unlock(&function_bind_sem); - return android_switch_function(val); -} - - -int android_show_function(char *buf) -{ - unsigned length = 0; - struct android_dev *dev = _android_dev; - struct android_usb_function *f; - char *ebl_str[2] = {"disable", "enable"}; - char *p; - int i; - - for (i = 0; dev->functions[i] != NULL; i++) { - - p = ebl_str[0]; - list_for_each_entry(f, &dev->enabled_functions, enabled_list) { - if (!strcmp(dev->functions[i]->name, f->name)) { - p = ebl_str[1]; - break; - } - } - - length += sprintf(buf + length, "%s:%s\n", - dev->functions[i]->name, p); - - } - return length; -} - - -int android_switch_function(unsigned func) -{ - struct android_dev *dev = _android_dev; - struct android_usb_function **functions = dev->functions; - struct android_usb_function *f; - struct android_usb_product *product; - int product_id, vendor_id; - unsigned val; - - /* framework may try to enable adb before android_usb_init_work is done.*/ - if (dev->enabled != true) { - pr_info("%s: USB driver is not initialize\n", __func__); - return 0; - } - - mutex_lock(&function_bind_sem); - - val = htc_usb_get_func_combine_value(); - - pr_info("%s: %u, before %u\n", __func__, func, val); - - if (func == val) { - pr_info("%s: SKIP due the function is the same ,%u\n" - , __func__, func); - mutex_unlock(&function_bind_sem); - return 0; - } - - usb_gadget_disconnect(dev->cdev->gadget); - usb_remove_config(dev->cdev, &android_config_driver); - - INIT_LIST_HEAD(&dev->enabled_functions); - - while ((f = *functions++)) { - if ((func & (1 << USB_FUNCTION_UMS)) && - !strcmp(f->name, "mass_storage")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ADB)) && - !strcmp(f->name, "adb")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ECM)) && - !strcmp(f->name, "cdc_ethernet")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ACM)) && - !strcmp(f->name, "acm")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_RNDIS)) && - !strcmp(f->name, "rndis")) { - list_add_tail(&f->enabled_list, &dev->enabled_functions); - intrsharing = !((func >> USB_FUNCTION_RNDIS_IPT) & 1); - } else if ((func & (1 << USB_FUNCTION_DIAG)) && - !strcmp(f->name, "diag")) { - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_DIAG - func |= 1 << USB_FUNCTION_DIAG_MDM; -#endif - } else if ((func & (1 << USB_FUNCTION_MODEM)) && - !strcmp(f->name, "modem")) { - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM - func |= 1 << USB_FUNCTION_MODEM_MDM; -#endif - } else if ((func & (1 << USB_FUNCTION_SERIAL)) && - !strcmp(f->name, "serial")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_MTP)) && - !strcmp(f->name, "mtp")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_ACCESSORY)) && - !strcmp(f->name, "accessory")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else if ((func & (1 << USB_FUNCTION_PROJECTOR)) && - !strcmp(f->name, "projector")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_DIAG - else if ((func & (1 << USB_FUNCTION_DIAG_MDM)) && - !strcmp(f->name, "diag_mdm")) { - if (func & (1 << USB_FUNCTION_DIAG)) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else - func &= ~(1 << USB_FUNCTION_DIAG_MDM); - } -#endif - else if ((func & (1 << USB_FUNCTION_RMNET)) && - !strcmp(f->name, "rmnet")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#ifdef CONFIG_USB_ANDROID_MDM9K_MODEM - else if ((func & (1 << USB_FUNCTION_MODEM_MDM)) && - !strcmp(f->name, "modem_mdm")) { - if (func & (1 << USB_FUNCTION_MODEM)) - list_add_tail(&f->enabled_list, &dev->enabled_functions); - else - func &= ~(1 << USB_FUNCTION_MODEM_MDM); - } -#endif -#ifdef CONFIG_USB_ANDROID_USBNET - else if ((func & (1 << USB_FUNCTION_USBNET)) && - !strcmp(f->name, "usbnet")) - list_add_tail(&f->enabled_list, &dev->enabled_functions); -#endif - } - - list_for_each_entry(f, &dev->enabled_functions, enabled_list) - pr_debug("# %s\n", f->name); - - product = get_product(dev, &dev->enabled_functions); - - if (product) { - vendor_id = product->vendor_id ? product->vendor_id : dev->pdata->vendor_id; - product_id = product->product_id; - } else { - vendor_id = dev->pdata->vendor_id; - product_id = dev->pdata->product_id; - } - - /* We need to specify the COMM class in the device descriptor - * if we are using RNDIS. - */ - if (product_id == PID_RNDIS || product_id == PID_ECM - || product_id == PID_ACM || product_id == PID_USBNET) - dev->cdev->desc.bDeviceClass = USB_CLASS_COMM; - else - dev->cdev->desc.bDeviceClass = USB_CLASS_PER_INTERFACE; - - if (dev->match) - product_id = dev->match(product_id, intrsharing); - - pr_info("%s: vendor_id=0x%x, product_id=0x%x\n", - __func__, vendor_id, product_id); - - device_desc.idVendor = __constant_cpu_to_le16(vendor_id); - device_desc.idProduct = __constant_cpu_to_le16(product_id); - - dev->cdev->desc.idVendor = device_desc.idVendor; - dev->cdev->desc.idProduct = device_desc.idProduct; - - device_desc.bDeviceClass = dev->cdev->desc.bDeviceClass; - - usb_add_config(dev->cdev, &android_config_driver, android_bind_config); - - mdelay(100); - usb_gadget_connect(dev->cdev->gadget); - dev->enabled = true; - - mutex_unlock(&function_bind_sem); - return 0; -} - -void android_set_serialno(char *serialno) -{ - strings_dev[STRING_SERIAL_IDX].s = serialno; -} - -void init_mfg_serialno(void) -{ - char *serialno = "000000000000"; - - use_mfg_serialno = (board_mfg_mode() == 1) ? 1 : 0; - strncpy(mfg_df_serialno, serialno, strlen(serialno)); -} - -static ssize_t show_usb_cable_connect(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned length; - - length = sprintf(buf, "%d", - (usb_get_connect_type() == CONNECT_TYPE_USB)?1:0); - return length; -} static ssize_t show_usb_function_switch(struct device *dev, struct device_attribute *attr, char *buf) { - return android_show_function(buf); + return 0; } static ssize_t store_usb_function_switch(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - unsigned u; - ssize_t ret; - - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) { - USB_ERR("%s: %d\n", __func__, ret); - return 0; - } - - ret = android_switch_function(u); - - if (ret == 0) - return count; - else - return 0; -} - -static ssize_t show_USB_ID_status(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct android_usb_platform_data *pdata = dev->platform_data; - int value = 1; - unsigned length; - printk(KERN_INFO "[USB] id pin: %d\n", pdata->usb_id_pin_gpio); - - if (pdata->usb_id_pin_gpio != 0) { - value = gpio_get_value(pdata->usb_id_pin_gpio); - printk(KERN_INFO"[USB] id pin status %d\n", value); - } - - length = sprintf(buf, "%d", value); - return length; -} - -static ssize_t show_usb_serial_number(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned length; - struct android_usb_platform_data *pdata = dev->platform_data; - - length = sprintf(buf, "%s", pdata->serial_number); - return length; -} - -static ssize_t store_usb_serial_number(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct android_usb_platform_data *pdata = dev->platform_data; - char *serialno = "000000000000"; - - if (buf[0] == '0' || buf[0] == '1') { - memset(mfg_df_serialno, 0x0, sizeof(mfg_df_serialno)); - if (buf[0] == '0') { - strncpy(mfg_df_serialno, serialno, strlen(serialno)); - use_mfg_serialno = 1; - android_set_serialno(mfg_df_serialno); - } else { - strncpy(mfg_df_serialno, pdata->serial_number, - strlen(pdata->serial_number)); - use_mfg_serialno = 0; - android_set_serialno(pdata->serial_number); - } - /* reset_device */ - android_force_reset(); - } - - return count; -} - -static ssize_t show_dummy_usb_serial_number(struct device *dev, - struct device_attribute *attr, char *buf) -{ - unsigned length; - struct android_usb_platform_data *pdata = dev->platform_data; - - if (use_mfg_serialno) - length = sprintf(buf, "%s", mfg_df_serialno); /* dummy */ - else - length = sprintf(buf, "%s", pdata->serial_number); /* Real */ - return length; -} - -static ssize_t store_dummy_usb_serial_number(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - int data_buff_size = (sizeof(mfg_df_serialno) > strlen(buf))? - strlen(buf):sizeof(mfg_df_serialno); - int loop_i; - - /* avoid overflow, mfg_df_serialno[16] always is 0x0 */ - if (data_buff_size == 16) - data_buff_size--; - - for (loop_i = 0; loop_i < data_buff_size; loop_i++) { - if (buf[loop_i] >= 0x30 && buf[loop_i] <= 0x39) /* 0-9 */ - continue; - else if (buf[loop_i] >= 0x41 && buf[loop_i] <= 0x5A) /* A-Z */ - continue; - if (buf[loop_i] == 0x0A) /* Line Feed */ - continue; - else { - printk(KERN_INFO "%s(): get invaild char (0x%2.2X)\n", - __func__, buf[loop_i]); - return -EINVAL; - } - } - - use_mfg_serialno = 1; - memset(mfg_df_serialno, 0x0, sizeof(mfg_df_serialno)); - strncpy(mfg_df_serialno, buf, data_buff_size); - android_set_serialno(mfg_df_serialno); - /*device_reset */ - android_force_reset(); - - return count; -} - -static ssize_t -show_usb_car_kit_enable(struct device *dev, struct device_attribute *attr, - char *buf) -{ - unsigned length; - int value = 0; -#ifdef CONFIG_CABLE_DETECT_ACCESSORY -#include - value = (cable_get_accessory_type() == DOCK_STATE_UNDOCKED) ? 0 : 1; - printk(KERN_INFO "USB_car_kit_enable %d\n", cable_get_accessory_type()); -#else - value = 0; - printk(KERN_INFO "USB_car_kit_enable: CABLE_DETECT_ACCESSORY was not defined\n"); -#endif - - length = sprintf(buf, "%d", value); - return length; -} - -static ssize_t show_usb_phy_setting(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return otg_show_usb_phy_setting(buf); -} -static ssize_t store_usb_phy_setting(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - return otg_store_usb_phy_setting(buf, count); -} - -#if (defined(CONFIG_USB_OTG) && defined(CONFIG_USB_OTG_HOST)) -void msm_otg_set_id_state(int id); -static ssize_t store_usb_host_mode(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - unsigned u, enable; - ssize_t ret; - - ret = strict_strtoul(buf, 10, (unsigned long *)&u); - if (ret < 0) { - USB_ERR("%s: %d\n", __func__, ret); - return 0; - } - - enable = u ? 1 : 0; - msm_otg_set_id_state(!enable); - - USB_INFO("%s USB host\n", enable ? "Enable" : "Disable"); - - return count; + return 0; } -static DEVICE_ATTR(host_mode, 0220, - NULL, store_usb_host_mode); -#endif -static DEVICE_ATTR(usb_cable_connect, 0444, show_usb_cable_connect, NULL); static DEVICE_ATTR(usb_function_switch, 0664, show_usb_function_switch, store_usb_function_switch); -static DEVICE_ATTR(USB_ID_status, 0444, show_USB_ID_status, NULL); -static DEVICE_ATTR(usb_serial_number, 0644, - show_usb_serial_number, store_usb_serial_number); -static DEVICE_ATTR(dummy_usb_serial_number, 0644, - show_dummy_usb_serial_number, store_dummy_usb_serial_number); -static DEVICE_ATTR(usb_car_kit_enable, 0444, show_usb_car_kit_enable, NULL); -static DEVICE_ATTR(usb_phy_setting, 0664, - show_usb_phy_setting, store_usb_phy_setting); static struct attribute *android_htc_usb_attributes[] = { - &dev_attr_usb_cable_connect.attr, &dev_attr_usb_function_switch.attr, - &dev_attr_USB_ID_status.attr, /* for MFG */ - &dev_attr_usb_serial_number.attr, /* for MFG */ - &dev_attr_dummy_usb_serial_number.attr, /* for MFG */ - &dev_attr_usb_car_kit_enable.attr, - &dev_attr_usb_phy_setting.attr, -#if (defined(CONFIG_USB_OTG) && defined(CONFIG_USB_OTG_HOST)) - &dev_attr_host_mode.attr, -#endif - NULL + NULL, }; -static const struct attribute_group android_usb_attr_group = { +static const struct attribute_group htc_attr_group = { .attrs = android_htc_usb_attributes, }; - diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index a56876aaf76..febadaa2a80 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -1050,6 +1050,8 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) // FIXME don't call this with the spinlock held ... if (copy_to_user (buf, dev->req->buf, len)) retval = -EFAULT; + else + retval = len; clean_req (dev->gadget->ep0, dev->req); /* NOTE userspace can't yet choose to stall */ } diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c new file mode 100644 index 00000000000..43b1c544d6c --- /dev/null +++ b/drivers/usb/gadget/msm72k_udc.c @@ -0,0 +1,2793 @@ +/* + * Driver for HighSpeed USB Client Controller in MSM7K + * + * Copyright (C) 2008 Google, Inc. + * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. + * Author: Mike Lockwood + * Brian Swetland + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +static const char driver_name[] = "msm72k_udc"; + +/* #define DEBUG */ +/* #define VERBOSE */ + +#define MSM_USB_BASE ((unsigned) ui->addr) + +#define DRIVER_DESC "MSM 72K USB Peripheral Controller" +#define DRIVER_NAME "MSM72K_UDC" + +#define EPT_FLAG_IN 0x0001 + +#define SETUP_BUF_SIZE 8 + + +static const char *const ep_name[] = { + "ep0out", "ep1out", "ep2out", "ep3out", + "ep4out", "ep5out", "ep6out", "ep7out", + "ep8out", "ep9out", "ep10out", "ep11out", + "ep12out", "ep13out", "ep14out", "ep15out", + "ep0in", "ep1in", "ep2in", "ep3in", + "ep4in", "ep5in", "ep6in", "ep7in", + "ep8in", "ep9in", "ep10in", "ep11in", + "ep12in", "ep13in", "ep14in", "ep15in" +}; + +/*To release the wakelock from debugfs*/ +static int release_wlocks; + +struct msm_request { + struct usb_request req; + + /* saved copy of req.complete */ + void (*gadget_complete)(struct usb_ep *ep, + struct usb_request *req); + + + struct usb_info *ui; + struct msm_request *next; + struct msm_request *prev; + + unsigned busy:1; + unsigned live:1; + unsigned alloced:1; + + dma_addr_t dma; + dma_addr_t item_dma; + + struct ept_queue_item *item; +}; + +#define to_msm_request(r) container_of(r, struct msm_request, req) +#define to_msm_endpoint(r) container_of(r, struct msm_endpoint, ep) +#define to_msm_otg(xceiv) container_of(xceiv, struct msm_otg, otg) +#define is_b_sess_vld() ((OTGSC_BSV & readl(USB_OTGSC)) ? 1 : 0) +#define is_usb_online(ui) (ui->usb_state != USB_STATE_NOTATTACHED) + +struct msm_endpoint { + struct usb_ep ep; + struct usb_info *ui; + struct msm_request *req; /* head of pending requests */ + struct msm_request *last; + unsigned flags; + + /* bit number (0-31) in various status registers + ** as well as the index into the usb_info's array + ** of all endpoints + */ + unsigned char bit; + unsigned char num; + unsigned long dTD_update_fail_count; + unsigned long false_prime_fail_count; + unsigned actual_prime_fail_count; + + unsigned wedged:1; + /* pointers to DMA transfer list area */ + /* these are allocated from the usb_info dma space */ + struct ept_queue_head *head; + struct timer_list prime_timer; +}; + +/* PHY status check timer to monitor phy stuck up on reset */ +static struct timer_list phy_status_timer; + +static void ept_prime_timer_func(unsigned long data); +static void usb_do_work(struct work_struct *w); +static void usb_do_remote_wakeup(struct work_struct *w); + + +#define USB_STATE_IDLE 0 +#define USB_STATE_ONLINE 1 +#define USB_STATE_OFFLINE 2 + +#define USB_FLAG_START 0x0001 +#define USB_FLAG_VBUS_ONLINE 0x0002 +#define USB_FLAG_VBUS_OFFLINE 0x0004 +#define USB_FLAG_RESET 0x0008 +#define USB_FLAG_SUSPEND 0x0010 +#define USB_FLAG_CONFIGURED 0x0020 + +#define USB_CHG_DET_DELAY msecs_to_jiffies(1000) +#define REMOTE_WAKEUP_DELAY msecs_to_jiffies(1000) +#define PHY_STATUS_CHECK_DELAY (jiffies + msecs_to_jiffies(1000)) +#define EPT_PRIME_CHECK_DELAY (jiffies + msecs_to_jiffies(1000)) + +struct usb_info { + /* lock for register/queue/device state changes */ + spinlock_t lock; + + /* single request used for handling setup transactions */ + struct usb_request *setup_req; + + struct platform_device *pdev; + int irq; + void *addr; + + unsigned state; + unsigned flags; + + atomic_t configured; + atomic_t running; + + struct dma_pool *pool; + + /* dma page to back the queue heads and items */ + unsigned char *buf; + dma_addr_t dma; + + struct ept_queue_head *head; + + /* used for allocation */ + unsigned next_item; + unsigned next_ifc_num; + + /* endpoints are ordered based on their status bits, + ** so they are OUT0, OUT1, ... OUT15, IN0, IN1, ... IN15 + */ + struct msm_endpoint ept[32]; + + + /* max power requested by selected configuration */ + unsigned b_max_pow; + unsigned chg_current; + struct delayed_work chg_det; + struct delayed_work chg_stop; + struct msm_hsusb_gadget_platform_data *pdata; + struct work_struct phy_status_check; + + struct work_struct work; + unsigned phy_status; + unsigned phy_fail_count; + unsigned prime_fail_count; + unsigned long dTD_update_fail_count; + + struct usb_gadget gadget; + struct usb_gadget_driver *driver; + struct switch_dev sdev; + +#define ep0out ept[0] +#define ep0in ept[16] + + atomic_t ep0_dir; + atomic_t test_mode; + atomic_t offline_pending; + atomic_t softconnect; +#ifdef CONFIG_USB_OTG + u8 hnp_avail; +#endif + + atomic_t remote_wakeup; + atomic_t self_powered; + struct delayed_work rw_work; + + struct otg_transceiver *xceiv; + enum usb_device_state usb_state; + struct wake_lock wlock; +}; + +static const struct usb_ep_ops msm72k_ep_ops; +static struct usb_info *the_usb_info; + +static int msm72k_wakeup(struct usb_gadget *_gadget); +static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active); +static int msm72k_set_halt(struct usb_ep *_ep, int value); +static void flush_endpoint(struct msm_endpoint *ept); +static void usb_reset(struct usb_info *ui); +static int usb_ept_set_halt(struct usb_ep *_ep, int value); + +static void msm_hsusb_set_speed(struct usb_info *ui) +{ + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + switch (readl(USB_PORTSC) & PORTSC_PSPD_MASK) { + case PORTSC_PSPD_FS: + dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_FULL\n"); + ui->gadget.speed = USB_SPEED_FULL; + break; + case PORTSC_PSPD_LS: + dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_LOW\n"); + ui->gadget.speed = USB_SPEED_LOW; + break; + case PORTSC_PSPD_HS: + dev_dbg(&ui->pdev->dev, "portchange USB_SPEED_HIGH\n"); + ui->gadget.speed = USB_SPEED_HIGH; + break; + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void msm_hsusb_set_state(enum usb_device_state state) +{ + unsigned long flags; + + spin_lock_irqsave(&the_usb_info->lock, flags); + the_usb_info->usb_state = state; + spin_unlock_irqrestore(&the_usb_info->lock, flags); +} + +static enum usb_device_state msm_hsusb_get_state(void) +{ + unsigned long flags; + enum usb_device_state state; + + spin_lock_irqsave(&the_usb_info->lock, flags); + state = the_usb_info->usb_state; + spin_unlock_irqrestore(&the_usb_info->lock, flags); + + return state; +} + +static ssize_t print_switch_name(struct switch_dev *sdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", DRIVER_NAME); +} + +static ssize_t print_switch_state(struct switch_dev *sdev, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", + sdev->state ? "online" : "offline"); +} + +static inline enum chg_type usb_get_chg_type(struct usb_info *ui) +{ + if ((readl(USB_PORTSC) & PORTSC_LS) == PORTSC_LS) + return USB_CHG_TYPE__WALLCHARGER; + else + return USB_CHG_TYPE__SDP; +} + +#define USB_WALLCHARGER_CHG_CURRENT 1800 +static int usb_get_max_power(struct usb_info *ui) +{ + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long flags; + enum chg_type temp; + int suspended; + int configured; + unsigned bmaxpow; + + if (ui->gadget.is_a_peripheral) + return -EINVAL; + + temp = atomic_read(&otg->chg_type); + spin_lock_irqsave(&ui->lock, flags); + suspended = ui->usb_state == USB_STATE_SUSPENDED ? 1 : 0; + configured = atomic_read(&ui->configured); + bmaxpow = ui->b_max_pow; + spin_unlock_irqrestore(&ui->lock, flags); + + if (temp == USB_CHG_TYPE__INVALID) + return -ENODEV; + + if (temp == USB_CHG_TYPE__WALLCHARGER) + return USB_WALLCHARGER_CHG_CURRENT; + + if (suspended || !configured) + return 0; + + return bmaxpow; +} + +static int usb_phy_stuck_check(struct usb_info *ui) +{ + /* + * write some value (0xAA) into scratch reg (0x16) and read it back, + * If the read value is same as written value, means PHY is normal + * otherwise, PHY seems to have stuck. + */ + + if (otg_io_write(ui->xceiv, 0xAA, 0x16) == -1) { + dev_dbg(&ui->pdev->dev, + "%s(): ulpi write timeout\n", __func__); + return -EIO; + } + + if (otg_io_read(ui->xceiv, 0x16) != 0xAA) { + dev_dbg(&ui->pdev->dev, + "%s(): read value is incorrect\n", __func__); + return -EIO; + } + + return 0; +} + +/* + * This function checks the phy status by reading/writing to the + * phy scratch register. If the phy is stuck resets the HW + * */ +static void usb_phy_stuck_recover(struct work_struct *w) +{ + struct usb_info *ui = the_usb_info; + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + if (ui->gadget.speed != USB_SPEED_UNKNOWN || + ui->usb_state == USB_STATE_NOTATTACHED || + ui->driver == NULL) { + spin_unlock_irqrestore(&ui->lock, flags); + return; + } + spin_unlock_irqrestore(&ui->lock, flags); + + disable_irq(otg->irq); + if (usb_phy_stuck_check(ui)) { +#ifdef CONFIG_USB_MSM_ACA + del_timer_sync(&otg->id_timer); +#endif + ui->phy_fail_count++; + dev_err(&ui->pdev->dev, + "%s():PHY stuck, resetting HW\n", __func__); + /* + * PHY seems to have stuck, + * reset the PHY and HW link to recover the PHY + */ + usb_reset(ui); +#ifdef CONFIG_USB_MSM_ACA + mod_timer(&otg->id_timer, jiffies + + msecs_to_jiffies(OTG_ID_POLL_MS)); +#endif + msm72k_pullup_internal(&ui->gadget, 1); + } + enable_irq(otg->irq); +} + +static void usb_phy_status_check_timer(unsigned long data) +{ + struct usb_info *ui = the_usb_info; + + schedule_work(&ui->phy_status_check); +} + +static void usb_chg_stop(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, chg_stop.work); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + enum chg_type temp; + + temp = atomic_read(&otg->chg_type); + + if (temp == USB_CHG_TYPE__SDP) + otg_set_power(ui->xceiv, 0); +} + +static void usb_chg_detect(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, chg_det.work); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + enum chg_type temp = USB_CHG_TYPE__INVALID; + unsigned long flags; + int maxpower; + + spin_lock_irqsave(&ui->lock, flags); + if (ui->usb_state == USB_STATE_NOTATTACHED) { + spin_unlock_irqrestore(&ui->lock, flags); + return; + } + + temp = usb_get_chg_type(ui); + spin_unlock_irqrestore(&ui->lock, flags); + + atomic_set(&otg->chg_type, temp); + maxpower = usb_get_max_power(ui); + if (maxpower > 0) + otg_set_power(ui->xceiv, maxpower); + + /* USB driver prevents idle and suspend power collapse(pc) + * while USB cable is connected. But when dedicated charger is + * connected, driver can vote for idle and suspend pc. + * OTG driver handles idle pc as part of above otg_set_power call + * when wallcharger is attached. To allow suspend pc, release the + * wakelock which will be re-acquired for any sub-sequent usb interrupts + * */ + if (temp == USB_CHG_TYPE__WALLCHARGER) { + pm_runtime_put_sync(&ui->pdev->dev); + wake_unlock(&ui->wlock); + } +} + +static int usb_ep_get_stall(struct msm_endpoint *ept) +{ + unsigned int n; + struct usb_info *ui = ept->ui; + + n = readl(USB_ENDPTCTRL(ept->num)); + if (ept->flags & EPT_FLAG_IN) + return (CTRL_TXS & n) ? 1 : 0; + else + return (CTRL_RXS & n) ? 1 : 0; +} + +static void init_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + + ept->ui = ui; + ept->bit = n; + ept->num = n & 15; + ept->ep.name = ep_name[n]; + ept->ep.ops = &msm72k_ep_ops; + + if (ept->bit > 15) { + /* IN endpoint */ + ept->head = ui->head + (ept->num << 1) + 1; + ept->flags = EPT_FLAG_IN; + } else { + /* OUT endpoint */ + ept->head = ui->head + (ept->num << 1); + ept->flags = 0; + } + setup_timer(&ept->prime_timer, ept_prime_timer_func, + (unsigned long) ept); + + } +} + +static void config_ept(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + unsigned cfg = CONFIG_MAX_PKT(ept->ep.maxpacket) | CONFIG_ZLT; + + /* ep0 out needs interrupt-on-setup */ + if (ept->bit == 0) + cfg |= CONFIG_IOS; + + ept->head->config = cfg; + ept->head->next = TERMINATE; + + if (ept->ep.maxpacket) + dev_dbg(&ui->pdev->dev, + "ept #%d %s max:%d head:%p bit:%d\n", + ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->ep.maxpacket, ept->head, ept->bit); +} + +static void configure_endpoints(struct usb_info *ui) +{ + unsigned n; + + for (n = 0; n < 32; n++) + config_ept(ui->ept + n); +} + +struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept, + unsigned bufsize, gfp_t gfp_flags) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req; + + req = kzalloc(sizeof(*req), gfp_flags); + if (!req) + goto fail1; + + req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma); + if (!req->item) + goto fail2; + + if (bufsize) { + req->req.buf = kmalloc(bufsize, gfp_flags); + if (!req->req.buf) + goto fail3; + req->alloced = 1; + } + + return &req->req; + +fail3: + dma_pool_free(ui->pool, req->item, req->item_dma); +fail2: + kfree(req); +fail1: + return 0; +} + +static void usb_ept_enable(struct msm_endpoint *ept, int yes, + unsigned char ep_type) +{ + struct usb_info *ui = ept->ui; + int in = ept->flags & EPT_FLAG_IN; + unsigned n; + + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + if (yes) { + n = (n & (~CTRL_TXT_MASK)) | + (ep_type << CTRL_TXT_EP_TYPE_SHIFT); + n |= CTRL_TXE | CTRL_TXR; + } else + n &= (~CTRL_TXE); + } else { + if (yes) { + n = (n & (~CTRL_RXT_MASK)) | + (ep_type << CTRL_RXT_EP_TYPE_SHIFT); + n |= CTRL_RXE | CTRL_RXR; + } else + n &= ~(CTRL_RXE); + } + /* complete all the updates to ept->head before enabling endpoint*/ + mb(); + writel(n, USB_ENDPTCTRL(ept->num)); + + /* Ensure endpoint is enabled before returning */ + mb(); + + dev_dbg(&ui->pdev->dev, "ept %d %s %s\n", + ept->num, in ? "in" : "out", yes ? "enabled" : "disabled"); +} + +static void ept_prime_timer_func(unsigned long data) +{ + struct msm_endpoint *ept = (struct msm_endpoint *)data; + struct usb_info *ui = ept->ui; + unsigned n = 1 << ept->bit; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + + ept->false_prime_fail_count++; + if ((readl_relaxed(USB_ENDPTPRIME) & n)) { + /* + * ---- UNLIKELY --- + * May be hardware is taking long time to process the + * prime request. Or could be intermittent priming and + * previous dTD is not fired yet. + */ + mod_timer(&ept->prime_timer, EPT_PRIME_CHECK_DELAY); + goto out; + } + if (readl_relaxed(USB_ENDPTSTAT) & n) + goto out; + + /* clear speculative loads on item->info */ + rmb(); + if (ept->req && (ept->req->item->info & INFO_ACTIVE)) { + ui->prime_fail_count++; + ept->actual_prime_fail_count++; + pr_err("%s(): ept%d%s prime failed. ept: config: %x" + "active: %x next: %x info: %x\n", + __func__, ept->num, + ept->flags & EPT_FLAG_IN ? "in" : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + writel_relaxed(n, USB_ENDPTPRIME); + mod_timer(&ept->prime_timer, EPT_PRIME_CHECK_DELAY); + } +out: + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void usb_ept_start(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req = ept->req; + unsigned n = 1 << ept->bit; + + BUG_ON(req->live); + + while (req) { + req->live = 1; + /* prepare the transaction descriptor item for the hardware */ + req->item->info = + INFO_BYTES(req->req.length) | INFO_IOC | INFO_ACTIVE; + req->item->page0 = req->dma; + req->item->page1 = (req->dma + 0x1000) & 0xfffff000; + req->item->page2 = (req->dma + 0x2000) & 0xfffff000; + req->item->page3 = (req->dma + 0x3000) & 0xfffff000; + + if (req->next == NULL) { + req->item->next = TERMINATE; + break; + } + req->item->next = req->next->item_dma; + req = req->next; + } + + rmb(); + /* link the hw queue head to the request's transaction item */ + ept->head->next = ept->req->item_dma; + ept->head->info = 0; + + /* flush buffers before priming ept */ + mb(); + /* during high throughput testing it is observed that + * ept stat bit is not set even though all the data + * structures are updated properly and ept prime bit + * is set. To workaround the issue, kick a timer and + * make decision on re-prime. We can do a busy loop here + * but it leads to high cpu usage. + */ + writel_relaxed(n, USB_ENDPTPRIME); + mod_timer(&ept->prime_timer, EPT_PRIME_CHECK_DELAY); +} + +int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req) +{ + unsigned long flags; + struct msm_request *req = to_msm_request(_req); + struct msm_request *last; + struct usb_info *ui = ept->ui; + unsigned length = req->req.length; + + if (length > 0x4000) + return -EMSGSIZE; + + spin_lock_irqsave(&ui->lock, flags); + + if (req->busy) { + req->req.status = -EBUSY; + spin_unlock_irqrestore(&ui->lock, flags); + dev_err(&ui->pdev->dev, + "usb_ept_queue_xfer() tried to queue busy request\n"); + return -EBUSY; + } + + if (!atomic_read(&ui->configured) && (ept->num != 0)) { + req->req.status = -ESHUTDOWN; + spin_unlock_irqrestore(&ui->lock, flags); + if (printk_ratelimit()) + dev_err(&ui->pdev->dev, + "%s: called while offline\n", __func__); + return -ESHUTDOWN; + } + + if (ui->usb_state == USB_STATE_SUSPENDED) { + if (!atomic_read(&ui->remote_wakeup)) { + req->req.status = -EAGAIN; + spin_unlock_irqrestore(&ui->lock, flags); + if (printk_ratelimit()) + dev_err(&ui->pdev->dev, + "%s: cannot queue as bus is suspended " + "ept #%d %s max:%d head:%p bit:%d\n", + __func__, ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->ep.maxpacket, ept->head, ept->bit); + + return -EAGAIN; + } + + wake_lock(&ui->wlock); + otg_set_suspend(ui->xceiv, 0); + schedule_delayed_work(&ui->rw_work, REMOTE_WAKEUP_DELAY); + } + + req->busy = 1; + req->live = 0; + req->next = 0; + req->req.status = -EBUSY; + + req->dma = dma_map_single(NULL, req->req.buf, length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + + /* Add the new request to the end of the queue */ + last = ept->last; + if (last) { + /* Already requests in the queue. add us to the + * end, but let the completion interrupt actually + * start things going, to avoid hw issues + */ + last->next = req; + req->prev = last; + + } else { + /* queue was empty -- kick the hardware */ + ept->req = req; + req->prev = NULL; + usb_ept_start(ept); + } + ept->last = req; + + spin_unlock_irqrestore(&ui->lock, flags); + return 0; +} + +/* --- endpoint 0 handling --- */ + +static void ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + + req->complete = r->gadget_complete; + r->gadget_complete = 0; + if (req->complete) + req->complete(&ui->ep0in.ep, req); +} + +static void ep0_status_complete(struct usb_ep *ep, struct usb_request *_req) +{ + struct usb_request *req = _req->context; + struct msm_request *r; + struct msm_endpoint *ept; + struct usb_info *ui; + + pr_debug("%s:\n", __func__); + if (!req) + return; + + r = to_msm_request(req); + ept = to_msm_endpoint(ep); + ui = ept->ui; + _req->context = 0; + + req->complete = r->gadget_complete; + req->zero = 0; + r->gadget_complete = 0; + if (req->complete) + req->complete(&ui->ep0in.ep, req); + +} + +static void ep0_status_phase(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + + pr_debug("%s:\n", __func__); + + req->length = 0; + req->complete = ep0_status_complete; + + /* status phase */ + if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) + usb_ept_queue_xfer(&ui->ep0out, req); + else + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0in_send_zero_leng_pkt(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct usb_request *req = ui->setup_req; + + pr_debug("%s:\n", __func__); + + req->length = 0; + req->complete = ep0_status_phase; + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0_queue_ack_complete(struct usb_ep *ep, + struct usb_request *_req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + struct usb_request *req = ui->setup_req; + + pr_debug("%s: _req:%p actual:%d length:%d zero:%d\n", + __func__, _req, _req->actual, + _req->length, _req->zero); + + /* queue up the receive of the ACK response from the host */ + if (_req->status == 0 && _req->actual == _req->length) { + req->context = _req; + if (atomic_read(&ui->ep0_dir) == USB_DIR_IN) { + if (_req->zero && _req->length && + !(_req->length % ep->maxpacket)) { + ep0in_send_zero_leng_pkt(&ui->ep0in); + return; + } + } + ep0_status_phase(ep, req); + } else + ep0_complete(ep, _req); +} + +static void ep0_setup_ack_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msm_endpoint *ept = to_msm_endpoint(ep); + struct usb_info *ui = ept->ui; + unsigned int temp; + int test_mode = atomic_read(&ui->test_mode); + + if (!test_mode) + return; + + switch (test_mode) { + case J_TEST: + dev_info(&ui->pdev->dev, "usb electrical test mode: (J)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_J_STATE, USB_PORTSC); + break; + + case K_TEST: + dev_info(&ui->pdev->dev, "usb electrical test mode: (K)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_K_STATE, USB_PORTSC); + break; + + case SE0_NAK_TEST: + dev_info(&ui->pdev->dev, + "usb electrical test mode: (SE0-NAK)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_SE0_NAK, USB_PORTSC); + break; + + case TST_PKT_TEST: + dev_info(&ui->pdev->dev, + "usb electrical test mode: (TEST_PKT)\n"); + temp = readl(USB_PORTSC) & (~PORTSC_PTC); + writel(temp | PORTSC_PTC_TST_PKT, USB_PORTSC); + break; + } +} + +static void ep0_setup_ack(struct usb_info *ui) +{ + struct usb_request *req = ui->setup_req; + req->length = 0; + req->complete = ep0_setup_ack_complete; + usb_ept_queue_xfer(&ui->ep0in, req); +} + +static void ep0_setup_stall(struct usb_info *ui) +{ + writel((1<<16) | (1<<0), USB_ENDPTCTRL(0)); +} + +static void ep0_setup_send(struct usb_info *ui, unsigned length) +{ + struct usb_request *req = ui->setup_req; + struct msm_request *r = to_msm_request(req); + struct msm_endpoint *ept = &ui->ep0in; + + req->length = length; + req->complete = ep0_queue_ack_complete; + r->gadget_complete = 0; + usb_ept_queue_xfer(ept, req); +} + +static void handle_setup(struct usb_info *ui) +{ + struct usb_ctrlrequest ctl; + struct usb_request *req = ui->setup_req; + int ret; +#ifdef CONFIG_USB_OTG + u8 hnp; + unsigned long flags; +#endif + /* USB hardware sometimes generate interrupt before + * 8 bytes of SETUP packet are written to system memory. + * This results in fetching wrong setup_data sometimes. + * TODO: Remove below workaround of adding 1us delay once + * it gets fixed in hardware. + */ + udelay(10); + + memcpy(&ctl, ui->ep0out.head->setup_data, sizeof(ctl)); + /* Ensure buffer is read before acknowledging to h/w */ + mb(); + + writel(EPT_RX(0), USB_ENDPTSETUPSTAT); + + if (ctl.bRequestType & USB_DIR_IN) + atomic_set(&ui->ep0_dir, USB_DIR_IN); + else + atomic_set(&ui->ep0_dir, USB_DIR_OUT); + + /* any pending ep0 transactions must be canceled */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + dev_dbg(&ui->pdev->dev, + "setup: type=%02x req=%02x val=%04x idx=%04x len=%04x\n", + ctl.bRequestType, ctl.bRequest, ctl.wValue, + ctl.wIndex, ctl.wLength); + + if ((ctl.bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) == + (USB_DIR_IN | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_GET_STATUS) { + /* OTG supplement Rev 2.0 introduces another device + * GET_STATUS request for HNP polling with length = 1. + */ + u8 len = 2; + switch (ctl.bRequestType & USB_RECIP_MASK) { + case USB_RECIP_ENDPOINT: + { + struct msm_endpoint *ept; + unsigned num = + ctl.wIndex & USB_ENDPOINT_NUMBER_MASK; + u16 temp = 0; + + if (num == 0) { + memset(req->buf, 0, 2); + break; + } + if (ctl.wIndex & USB_ENDPOINT_DIR_MASK) + num += 16; + ept = &ui->ep0out + num; + temp = usb_ep_get_stall(ept); + temp = temp << USB_ENDPOINT_HALT; + memcpy(req->buf, &temp, 2); + break; + } + case USB_RECIP_DEVICE: + { + u16 temp = 0; + + if (ctl.wIndex == OTG_STATUS_SELECTOR) { +#ifdef CONFIG_USB_OTG + spin_lock_irqsave(&ui->lock, flags); + hnp = (ui->gadget.host_request << + HOST_REQUEST_FLAG); + ui->hnp_avail = 1; + spin_unlock_irqrestore(&ui->lock, + flags); + memcpy(req->buf, &hnp, 1); + len = 1; +#else + goto stall; +#endif + } else { + temp = (atomic_read(&ui->self_powered) + << USB_DEVICE_SELF_POWERED); + temp |= (atomic_read(&ui->remote_wakeup) + << USB_DEVICE_REMOTE_WAKEUP); + memcpy(req->buf, &temp, 2); + } + break; + } + case USB_RECIP_INTERFACE: + memset(req->buf, 0, 2); + break; + default: + goto stall; + } + ep0_setup_send(ui, len); + return; + } + } + if (ctl.bRequestType == + (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)) { + if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) || + (ctl.bRequest == USB_REQ_SET_FEATURE)) { + if ((ctl.wValue == 0) && (ctl.wLength == 0)) { + unsigned num = ctl.wIndex & 0x0f; + + if (num != 0) { + struct msm_endpoint *ept; + + if (ctl.wIndex & 0x80) + num += 16; + ept = &ui->ep0out + num; + + if (ept->wedged) + goto ack; + if (ctl.bRequest == USB_REQ_SET_FEATURE) + usb_ept_set_halt(&ept->ep, 1); + else + usb_ept_set_halt(&ept->ep, 0); + } + goto ack; + } + } + } + if (ctl.bRequestType == (USB_DIR_OUT | USB_TYPE_STANDARD)) { + if (ctl.bRequest == USB_REQ_SET_CONFIGURATION) { + atomic_set(&ui->configured, !!ctl.wValue); + msm_hsusb_set_state(USB_STATE_CONFIGURED); + } else if (ctl.bRequest == USB_REQ_SET_ADDRESS) { + /* + * Gadget speed should be set when PCI interrupt + * occurs. But sometimes, PCI interrupt is not + * occuring after reset. Hence update the gadget + * speed here. + */ + if (ui->gadget.speed == USB_SPEED_UNKNOWN) { + dev_info(&ui->pdev->dev, + "PCI intr missed" + "set speed explictly\n"); + msm_hsusb_set_speed(ui); + } + msm_hsusb_set_state(USB_STATE_ADDRESS); + + /* write address delayed (will take effect + ** after the next IN txn) + */ + writel((ctl.wValue << 25) | (1 << 24), USB_DEVICEADDR); + goto ack; + } else if (ctl.bRequest == USB_REQ_SET_FEATURE) { + switch (ctl.wValue) { + case USB_DEVICE_TEST_MODE: + switch (ctl.wIndex) { + case J_TEST: + case K_TEST: + case SE0_NAK_TEST: + case TST_PKT_TEST: + atomic_set(&ui->test_mode, ctl.wIndex); + goto ack; + } + goto stall; + case USB_DEVICE_REMOTE_WAKEUP: + atomic_set(&ui->remote_wakeup, 1); + goto ack; +#ifdef CONFIG_USB_OTG + case USB_DEVICE_B_HNP_ENABLE: + ui->gadget.b_hnp_enable = 1; + goto ack; + case USB_DEVICE_A_HNP_SUPPORT: + case USB_DEVICE_A_ALT_HNP_SUPPORT: + /* B-devices compliant to OTG spec + * Rev 2.0 are not required to + * suppport these features. + */ + goto stall; +#endif + } + } else if ((ctl.bRequest == USB_REQ_CLEAR_FEATURE) && + (ctl.wValue == USB_DEVICE_REMOTE_WAKEUP)) { + atomic_set(&ui->remote_wakeup, 0); + goto ack; + } + } + + /* delegate if we get here */ + if (ui->driver) { + ret = ui->driver->setup(&ui->gadget, &ctl); + if (ret >= 0) + return; + } + +stall: + /* stall ep0 on error */ + ep0_setup_stall(ui); + return; + +ack: + ep0_setup_ack(ui); +} + +static void handle_endpoint(struct usb_info *ui, unsigned bit) +{ + struct msm_endpoint *ept = ui->ept + bit; + struct msm_request *req; + unsigned long flags; + unsigned info; + + /* + INFO("handle_endpoint() %d %s req=%p(%08x)\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out", + ept->req, ept->req ? ept->req->item_dma : 0); + */ + + /* expire all requests that are no longer active */ + spin_lock_irqsave(&ui->lock, flags); + while ((req = ept->req)) { + /* if we've processed all live requests, time to + * restart the hardware on the next non-live request + */ + if (!req->live) { + usb_ept_start(ept); + break; + } + + /* clean speculative fetches on req->item->info */ + dma_coherent_post_ops(); + info = req->item->info; + /* if the transaction is still in-flight, stop here */ + if (info & INFO_ACTIVE) + break; + + del_timer(&ept->prime_timer); + /* advance ept queue to the next request */ + ept->req = req->next; + if (ept->req == 0) + ept->last = 0; + + dma_unmap_single(NULL, req->dma, req->req.length, + (ept->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) { + /* XXX pass on more specific error code */ + req->req.status = -EIO; + req->req.actual = 0; + dev_err(&ui->pdev->dev, + "ept %d %s error. info=%08x\n", + ept->num, + (ept->flags & EPT_FLAG_IN) ? "in" : "out", + info); + } else { + req->req.status = 0; + req->req.actual = + req->req.length - ((info >> 16) & 0x7FFF); + } + req->busy = 0; + req->live = 0; + + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void flush_endpoint_hw(struct usb_info *ui, unsigned bits) +{ + /* flush endpoint, canceling transactions + ** - this can take a "large amount of time" (per databook) + ** - the flush can fail in some cases, thus we check STAT + ** and repeat if we're still operating + ** (does the fact that this doesn't use the tripwire matter?!) + */ + do { + writel(bits, USB_ENDPTFLUSH); + while (readl(USB_ENDPTFLUSH) & bits) + udelay(100); + } while (readl(USB_ENDPTSTAT) & bits); +} + +static void flush_endpoint_sw(struct msm_endpoint *ept) +{ + struct usb_info *ui = ept->ui; + struct msm_request *req, *next_req = NULL; + unsigned long flags; + + /* inactive endpoints have nothing to do here */ + if (ept->ep.maxpacket == 0) + return; + + /* put the queue head in a sane state */ + ept->head->info = 0; + ept->head->next = TERMINATE; + + /* cancel any pending requests */ + spin_lock_irqsave(&ui->lock, flags); + req = ept->req; + ept->req = 0; + ept->last = 0; + while (req != 0) { + req->busy = 0; + req->live = 0; + req->req.status = -ESHUTDOWN; + req->req.actual = 0; + + /* Gadget driver may free the request in completion + * handler. So keep a copy of next req pointer + * before calling completion handler. + */ + next_req = req->next; + if (req->req.complete) { + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ept->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + req = next_req; + } + spin_unlock_irqrestore(&ui->lock, flags); +} + +static void flush_endpoint(struct msm_endpoint *ept) +{ + del_timer(&ept->prime_timer); + flush_endpoint_hw(ept->ui, (1 << ept->bit)); + flush_endpoint_sw(ept); +} + +static irqreturn_t usb_interrupt(int irq, void *data) +{ + struct usb_info *ui = data; + unsigned n; + unsigned long flags; + + n = readl(USB_USBSTS); + writel(n, USB_USBSTS); + + /* somehow we got an IRQ while in the reset sequence: ignore it */ + if (!atomic_read(&ui->running)) + return IRQ_HANDLED; + + if (n & STS_PCI) { + msm_hsusb_set_speed(ui); + if (atomic_read(&ui->configured)) { + wake_lock(&ui->wlock); + + spin_lock_irqsave(&ui->lock, flags); + ui->usb_state = USB_STATE_CONFIGURED; + ui->flags = USB_FLAG_CONFIGURED; + spin_unlock_irqrestore(&ui->lock, flags); + + ui->driver->resume(&ui->gadget); + schedule_work(&ui->work); + } else { + msm_hsusb_set_state(USB_STATE_DEFAULT); + } + +#ifdef CONFIG_USB_OTG + /* notify otg to clear A_BIDL_ADIS timer */ + if (ui->gadget.is_a_peripheral) + otg_set_suspend(ui->xceiv, 0); +#endif + } + + if (n & STS_URI) { + dev_dbg(&ui->pdev->dev, "reset\n"); + spin_lock_irqsave(&ui->lock, flags); + ui->gadget.speed = USB_SPEED_UNKNOWN; + spin_unlock_irqrestore(&ui->lock, flags); +#ifdef CONFIG_USB_OTG + /* notify otg to clear A_BIDL_ADIS timer */ + if (ui->gadget.is_a_peripheral) + otg_set_suspend(ui->xceiv, 0); + spin_lock_irqsave(&ui->lock, flags); + /* Host request is persistent across reset */ + ui->gadget.b_hnp_enable = 0; + ui->hnp_avail = 0; + spin_unlock_irqrestore(&ui->lock, flags); +#endif + msm_hsusb_set_state(USB_STATE_DEFAULT); + atomic_set(&ui->remote_wakeup, 0); + if (!ui->gadget.is_a_peripheral) + schedule_delayed_work(&ui->chg_stop, 0); + + writel(readl(USB_ENDPTSETUPSTAT), USB_ENDPTSETUPSTAT); + writel(readl(USB_ENDPTCOMPLETE), USB_ENDPTCOMPLETE); + writel(0xffffffff, USB_ENDPTFLUSH); + writel(0, USB_ENDPTCTRL(1)); + + wake_lock(&ui->wlock); + if (atomic_read(&ui->configured)) { + /* marking us offline will cause ept queue attempts + ** to fail + */ + atomic_set(&ui->configured, 0); + /* Defer sending offline uevent to userspace */ + atomic_set(&ui->offline_pending, 1); + + /* XXX: we can't seem to detect going offline, + * XXX: so deconfigure on reset for the time being + */ + dev_dbg(&ui->pdev->dev, + "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + /* cancel pending ep0 transactions */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + } + /* Start phy stuck timer */ + if (ui->pdata && ui->pdata->is_phy_status_timer_on) + mod_timer(&phy_status_timer, PHY_STATUS_CHECK_DELAY); + } + + if (n & STS_SLI) { + dev_dbg(&ui->pdev->dev, "suspend\n"); + + spin_lock_irqsave(&ui->lock, flags); + ui->usb_state = USB_STATE_SUSPENDED; + ui->flags = USB_FLAG_SUSPEND; + spin_unlock_irqrestore(&ui->lock, flags); + + ui->driver->suspend(&ui->gadget); + schedule_work(&ui->work); +#ifdef CONFIG_USB_OTG + /* notify otg for + * 1. kicking A_BIDL_ADIS timer in case of A-peripheral + * 2. disabling pull-up and kicking B_ASE0_RST timer + */ + if (ui->gadget.b_hnp_enable || ui->gadget.is_a_peripheral) + otg_set_suspend(ui->xceiv, 1); +#endif + } + + if (n & STS_UI) { + n = readl(USB_ENDPTSETUPSTAT); + if (n & EPT_RX(0)) + handle_setup(ui); + + n = readl(USB_ENDPTCOMPLETE); + writel(n, USB_ENDPTCOMPLETE); + while (n) { + unsigned bit = __ffs(n); + handle_endpoint(ui, bit); + n = n & (~(1 << bit)); + } + } + return IRQ_HANDLED; +} + +static void usb_prepare(struct usb_info *ui) +{ + spin_lock_init(&ui->lock); + + memset(ui->buf, 0, 4096); + ui->head = (void *) (ui->buf + 0); + + /* only important for reset/reinit */ + memset(ui->ept, 0, sizeof(ui->ept)); + ui->next_item = 0; + ui->next_ifc_num = 0; + + init_endpoints(ui); + + ui->ep0in.ep.maxpacket = 64; + ui->ep0out.ep.maxpacket = 64; + + ui->setup_req = + usb_ept_alloc_req(&ui->ep0in, SETUP_BUF_SIZE, GFP_KERNEL); + + INIT_WORK(&ui->work, usb_do_work); + INIT_DELAYED_WORK(&ui->chg_det, usb_chg_detect); + INIT_DELAYED_WORK(&ui->chg_stop, usb_chg_stop); + INIT_DELAYED_WORK(&ui->rw_work, usb_do_remote_wakeup); + if (ui->pdata && ui->pdata->is_phy_status_timer_on) + INIT_WORK(&ui->phy_status_check, usb_phy_stuck_recover); +} + +static void usb_reset(struct usb_info *ui) +{ + struct msm_otg *otg = to_msm_otg(ui->xceiv); + + dev_dbg(&ui->pdev->dev, "reset controller\n"); + + atomic_set(&ui->running, 0); + + /* + * PHY reset takes minimum 100 msec. Hence reset only link + * during HNP. Reset PHY and link in B-peripheral mode. + */ + if (ui->gadget.is_a_peripheral) + otg->reset(ui->xceiv, 0); + else + otg->reset(ui->xceiv, 1); + + /* set usb controller interrupt threshold to zero*/ + writel((readl(USB_USBCMD) & ~USBCMD_ITC_MASK) | USBCMD_ITC(0), + USB_USBCMD); + + writel(ui->dma, USB_ENDPOINTLISTADDR); + + configure_endpoints(ui); + + /* marking us offline will cause ept queue attempts to fail */ + atomic_set(&ui->configured, 0); + + if (ui->driver) { + dev_dbg(&ui->pdev->dev, "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + + /* cancel pending ep0 transactions */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + /* enable interrupts */ + writel(STS_URI | STS_SLI | STS_UI | STS_PCI, USB_USBINTR); + + /* Ensure that h/w RESET is completed before returning */ + mb(); + + atomic_set(&ui->running, 1); +} + +static void usb_start(struct usb_info *ui) +{ + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_START; + schedule_work(&ui->work); + spin_unlock_irqrestore(&ui->lock, flags); +} + +static int usb_free(struct usb_info *ui, int ret) +{ + dev_dbg(&ui->pdev->dev, "usb_free(%d)\n", ret); + + if (ui->xceiv) + otg_put_transceiver(ui->xceiv); + + if (ui->irq) + free_irq(ui->irq, 0); + if (ui->pool) + dma_pool_destroy(ui->pool); + if (ui->dma) + dma_free_coherent(&ui->pdev->dev, 4096, ui->buf, ui->dma); + kfree(ui); + return ret; +} + +static void usb_do_work_check_vbus(struct usb_info *ui) +{ + unsigned long iflags; + + spin_lock_irqsave(&ui->lock, iflags); + if (is_usb_online(ui)) + ui->flags |= USB_FLAG_VBUS_ONLINE; + else + ui->flags |= USB_FLAG_VBUS_OFFLINE; + spin_unlock_irqrestore(&ui->lock, iflags); +} + +static void usb_do_work(struct work_struct *w) +{ + struct usb_info *ui = container_of(w, struct usb_info, work); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long iflags; + unsigned flags, _vbus; + + for (;;) { + spin_lock_irqsave(&ui->lock, iflags); + flags = ui->flags; + ui->flags = 0; + _vbus = is_usb_online(ui); + spin_unlock_irqrestore(&ui->lock, iflags); + + /* give up if we have nothing to do */ + if (flags == 0) + break; + + switch (ui->state) { + case USB_STATE_IDLE: + if (flags & USB_FLAG_START) { + int ret; + + if (!_vbus) { + ui->state = USB_STATE_OFFLINE; + break; + } + + pm_runtime_get_noresume(&ui->pdev->dev); + pm_runtime_resume(&ui->pdev->dev); + dev_dbg(&ui->pdev->dev, + "msm72k_udc: IDLE -> ONLINE\n"); + usb_reset(ui); + ret = request_irq(otg->irq, usb_interrupt, + IRQF_SHARED, + ui->pdev->name, ui); + /* FIXME: should we call BUG_ON when + * requst irq fails + */ + if (ret) { + dev_err(&ui->pdev->dev, + "hsusb: peripheral: request irq" + " failed:(%d)", ret); + break; + } + ui->irq = otg->irq; + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + + if (!atomic_read(&ui->softconnect)) + break; + + msm72k_pullup_internal(&ui->gadget, 1); + + if (!ui->gadget.is_a_peripheral) + schedule_delayed_work( + &ui->chg_det, + USB_CHG_DET_DELAY); + + } + break; + case USB_STATE_ONLINE: + if (atomic_read(&ui->offline_pending)) { + switch_set_state(&ui->sdev, 0); + atomic_set(&ui->offline_pending, 0); + } + + /* If at any point when we were online, we received + * the signal to go offline, we must honor it + */ + if (flags & USB_FLAG_VBUS_OFFLINE) { + + ui->chg_current = 0; + /* wait incase chg_detect is running */ + if (!ui->gadget.is_a_peripheral) + cancel_delayed_work_sync(&ui->chg_det); + + dev_dbg(&ui->pdev->dev, + "msm72k_udc: ONLINE -> OFFLINE\n"); + + atomic_set(&ui->running, 0); + atomic_set(&ui->remote_wakeup, 0); + atomic_set(&ui->configured, 0); + + if (ui->driver) { + dev_dbg(&ui->pdev->dev, + "usb: notify offline\n"); + ui->driver->disconnect(&ui->gadget); + } + /* cancel pending ep0 transactions */ + flush_endpoint(&ui->ep0out); + flush_endpoint(&ui->ep0in); + + /* synchronize with irq context */ + spin_lock_irqsave(&ui->lock, iflags); +#ifdef CONFIG_USB_OTG + ui->gadget.host_request = 0; + ui->gadget.b_hnp_enable = 0; + ui->hnp_avail = 0; +#endif + msm72k_pullup_internal(&ui->gadget, 0); + spin_unlock_irqrestore(&ui->lock, iflags); + + + /* if charger is initialized to known type + * we must let modem know about charger + * disconnection + */ + otg_set_power(ui->xceiv, 0); + + if (ui->irq) { + free_irq(ui->irq, ui); + ui->irq = 0; + } + + + switch_set_state(&ui->sdev, 0); + + ui->state = USB_STATE_OFFLINE; + usb_do_work_check_vbus(ui); + pm_runtime_put_noidle(&ui->pdev->dev); + pm_runtime_suspend(&ui->pdev->dev); + wake_unlock(&ui->wlock); + break; + } + if (flags & USB_FLAG_SUSPEND) { + int maxpower = usb_get_max_power(ui); + + if (maxpower < 0) + break; + + otg_set_power(ui->xceiv, 0); + /* To support TCXO during bus suspend + * This might be dummy check since bus suspend + * is not implemented as of now + * */ + if (release_wlocks) + wake_unlock(&ui->wlock); + + /* TBD: Initiate LPM at usb bus suspend */ + break; + } + if (flags & USB_FLAG_CONFIGURED) { + int maxpower = usb_get_max_power(ui); + + /* We may come here even when no configuration + * is selected. Send online/offline event + * accordingly. + */ + switch_set_state(&ui->sdev, + atomic_read(&ui->configured)); + + if (maxpower < 0) + break; + + ui->chg_current = maxpower; + otg_set_power(ui->xceiv, maxpower); + break; + } + if (flags & USB_FLAG_RESET) { + dev_dbg(&ui->pdev->dev, + "msm72k_udc: ONLINE -> RESET\n"); + msm72k_pullup_internal(&ui->gadget, 0); + usb_reset(ui); + msm72k_pullup_internal(&ui->gadget, 1); + dev_dbg(&ui->pdev->dev, + "msm72k_udc: RESET -> ONLINE\n"); + break; + } + break; + case USB_STATE_OFFLINE: + /* If we were signaled to go online and vbus is still + * present when we received the signal, go online. + */ + if ((flags & USB_FLAG_VBUS_ONLINE) && _vbus) { + int ret; + + pm_runtime_get_noresume(&ui->pdev->dev); + pm_runtime_resume(&ui->pdev->dev); + dev_dbg(&ui->pdev->dev, + "msm72k_udc: OFFLINE -> ONLINE\n"); + + usb_reset(ui); + ui->state = USB_STATE_ONLINE; + usb_do_work_check_vbus(ui); + ret = request_irq(otg->irq, usb_interrupt, + IRQF_SHARED, + ui->pdev->name, ui); + /* FIXME: should we call BUG_ON when + * requst irq fails + */ + if (ret) { + dev_err(&ui->pdev->dev, + "hsusb: peripheral: request irq" + " failed:(%d)", ret); + break; + } + ui->irq = otg->irq; + enable_irq_wake(otg->irq); + + if (!atomic_read(&ui->softconnect)) + break; + msm72k_pullup_internal(&ui->gadget, 1); + + if (!ui->gadget.is_a_peripheral) + schedule_delayed_work( + &ui->chg_det, + USB_CHG_DET_DELAY); + } + break; + } + } +} + +/* FIXME - the callers of this function should use a gadget API instead. + * This is called from htc_battery.c and board-halibut.c + * WARNING - this can get called before this driver is initialized. + */ +void msm_hsusb_set_vbus_state(int online) +{ + unsigned long flags; + struct usb_info *ui = the_usb_info; + + if (!ui) { + pr_err("%s called before driver initialized\n", __func__); + return; + } + + spin_lock_irqsave(&ui->lock, flags); + + if (is_usb_online(ui) == online) + goto out; + + if (online) { + ui->usb_state = USB_STATE_POWERED; + ui->flags |= USB_FLAG_VBUS_ONLINE; + } else { + ui->gadget.speed = USB_SPEED_UNKNOWN; + ui->usb_state = USB_STATE_NOTATTACHED; + ui->flags |= USB_FLAG_VBUS_OFFLINE; + } + if (in_interrupt()) { + schedule_work(&ui->work); + } else { + spin_unlock_irqrestore(&ui->lock, flags); + usb_do_work(&ui->work); + return; + } +out: + spin_unlock_irqrestore(&ui->lock, flags); +} + +#if defined(CONFIG_DEBUG_FS) + +void usb_function_reenumerate(void) +{ + struct usb_info *ui = the_usb_info; + + /* disable and re-enable the D+ pullup */ + dev_dbg(&ui->pdev->dev, "disable pullup\n"); + writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD); + + msleep(10); + + dev_dbg(&ui->pdev->dev, "enable pullup\n"); + writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD); +} + +static char debug_buffer[PAGE_SIZE]; + +static ssize_t debug_read_status(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + char *buf = debug_buffer; + unsigned long flags; + struct msm_endpoint *ept; + struct msm_request *req; + int n; + int i = 0; + + spin_lock_irqsave(&ui->lock, flags); + + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: setup=%08x prime=%08x stat=%08x done=%08x\n", + readl(USB_ENDPTSETUPSTAT), + readl(USB_ENDPTPRIME), + readl(USB_ENDPTSTAT), + readl(USB_ENDPTCOMPLETE)); + i += scnprintf(buf + i, PAGE_SIZE - i, + "regs: cmd=%08x sts=%08x intr=%08x port=%08x\n\n", + readl(USB_USBCMD), + readl(USB_USBSTS), + readl(USB_USBINTR), + readl(USB_PORTSC)); + + + for (n = 0; n < 32; n++) { + ept = ui->ept + n; + if (ept->ep.maxpacket == 0) + continue; + + i += scnprintf(buf + i, PAGE_SIZE - i, + "ept%d %s cfg=%08x active=%08x next=%08x info=%08x\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + + for (req = ept->req; req; req = req->next) + i += scnprintf(buf + i, PAGE_SIZE - i, + " req @%08x next=%08x info=%08x page0=%08x %c %c\n", + req->item_dma, req->item->next, + req->item->info, req->item->page0, + req->busy ? 'B' : ' ', + req->live ? 'L' : ' '); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, + "phy failure count: %d\n", ui->phy_fail_count); + + spin_unlock_irqrestore(&ui->lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static ssize_t debug_write_reset(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + ui->flags |= USB_FLAG_RESET; + schedule_work(&ui->work); + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} + +static ssize_t debug_write_cycle(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + usb_function_reenumerate(); + return count; +} + +static int debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +const struct file_operations debug_stat_ops = { + .open = debug_open, + .read = debug_read_status, +}; + +const struct file_operations debug_reset_ops = { + .open = debug_open, + .write = debug_write_reset, +}; + +const struct file_operations debug_cycle_ops = { + .open = debug_open, + .write = debug_write_cycle, +}; + +static ssize_t debug_read_release_wlocks(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + char kbuf[10]; + size_t c = 0; + + memset(kbuf, 0, 10); + + c = scnprintf(kbuf, 10, "%d", release_wlocks); + + if (copy_to_user(ubuf, kbuf, c)) + return -EFAULT; + + return c; +} +static ssize_t debug_write_release_wlocks(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + char kbuf[10]; + long temp; + + memset(kbuf, 0, 10); + + if (copy_from_user(kbuf, buf, count > 10 ? 10 : count)) + return -EFAULT; + + if (strict_strtol(kbuf, 10, &temp)) + return -EINVAL; + + if (temp) + release_wlocks = 1; + else + release_wlocks = 0; + + return count; +} +static int debug_wake_lock_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +const struct file_operations debug_wlocks_ops = { + .open = debug_wake_lock_open, + .read = debug_read_release_wlocks, + .write = debug_write_release_wlocks, +}; + +static ssize_t debug_reprime_ep(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + struct msm_endpoint *ept; + char kbuf[10]; + unsigned int ep_num, dir; + unsigned long flags; + unsigned n, i; + + memset(kbuf, 0, 10); + + if (copy_from_user(kbuf, ubuf, count > 10 ? 10 : count)) + return -EFAULT; + + if (sscanf(kbuf, "%u %u", &ep_num, &dir) != 2) + return -EINVAL; + + if (dir) + i = ep_num + 16; + else + i = ep_num; + + spin_lock_irqsave(&ui->lock, flags); + ept = ui->ept + i; + n = 1 << ept->bit; + + if ((readl_relaxed(USB_ENDPTPRIME) & n)) + goto out; + + if (readl_relaxed(USB_ENDPTSTAT) & n) + goto out; + + /* clear speculative loads on item->info */ + rmb(); + if (ept->req && (ept->req->item->info & INFO_ACTIVE)) { + pr_err("%s(): ept%d%s prime failed. ept: config: %x" + "active: %x next: %x info: %x\n", + __func__, ept->num, + ept->flags & EPT_FLAG_IN ? "in" : "out", + ept->head->config, ept->head->active, + ept->head->next, ept->head->info); + writel_relaxed(n, USB_ENDPTPRIME); + } +out: + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} + +static char buffer[512]; +static ssize_t debug_prime_fail_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct usb_info *ui = file->private_data; + char *buf = buffer; + unsigned long flags; + struct msm_endpoint *ept; + int n; + int i = 0; + + spin_lock_irqsave(&ui->lock, flags); + for (n = 0; n < 32; n++) { + ept = ui->ept + n; + if (ept->ep.maxpacket == 0) + continue; + + i += scnprintf(buf + i, PAGE_SIZE - i, + "ept%d %s false_prime_count=%lu prime_fail_count=%d dtd_fail_count=%lu\n", + ept->num, (ept->flags & EPT_FLAG_IN) ? "in " : "out", + ept->false_prime_fail_count, + ept->actual_prime_fail_count, + ept->dTD_update_fail_count); + } + + i += scnprintf(buf + i, PAGE_SIZE - i, + "dTD_update_fail count: %lu\n", + ui->dTD_update_fail_count); + + i += scnprintf(buf + i, PAGE_SIZE - i, + "prime_fail count: %d\n", ui->prime_fail_count); + + spin_unlock_irqrestore(&ui->lock, flags); + + return simple_read_from_buffer(ubuf, count, ppos, buf, i); +} + +static int debug_prime_fail_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +const struct file_operations prime_fail_ops = { + .open = debug_prime_fail_open, + .read = debug_prime_fail_read, + .write = debug_reprime_ep, +}; + +static void usb_debugfs_init(struct usb_info *ui) +{ + struct dentry *dent; + dent = debugfs_create_dir(dev_name(&ui->pdev->dev), 0); + if (IS_ERR(dent)) + return; + + debugfs_create_file("status", 0444, dent, ui, &debug_stat_ops); + debugfs_create_file("reset", 0222, dent, ui, &debug_reset_ops); + debugfs_create_file("cycle", 0222, dent, ui, &debug_cycle_ops); + debugfs_create_file("release_wlocks", 0666, dent, ui, + &debug_wlocks_ops); + debugfs_create_file("prime_fail_countt", 0666, dent, ui, + &prime_fail_ops); +} +#else +static void usb_debugfs_init(struct usb_info *ui) {} +#endif + +static int +msm72k_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + unsigned char ep_type = + desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + + _ep->maxpacket = le16_to_cpu(desc->wMaxPacketSize); + config_ept(ept); + ept->wedged = 0; + usb_ept_enable(ept, 1, ep_type); + return 0; +} + +static int msm72k_disable(struct usb_ep *_ep) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + + usb_ept_enable(ept, 0, 0); + flush_endpoint(ept); + return 0; +} + +static struct usb_request * +msm72k_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) +{ + return usb_ept_alloc_req(to_msm_endpoint(_ep), 0, gfp_flags); +} + +static void +msm72k_free_request(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_request *req = to_msm_request(_req); + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + + /* request should not be busy */ + BUG_ON(req->busy); + if (req->alloced) + kfree(req->req.buf); + dma_pool_free(ui->pool, req->item, req->item_dma); + kfree(req); +} + +static int +msm72k_queue(struct usb_ep *_ep, struct usb_request *req, gfp_t gfp_flags) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct usb_info *ui = ep->ui; + + if (ep == &ui->ep0in) { + struct msm_request *r = to_msm_request(req); + if (!req->length) + goto ep_queue_done; + r->gadget_complete = req->complete; + /* ep0_queue_ack_complete queue a receive for ACK before + ** calling req->complete + */ + req->complete = ep0_queue_ack_complete; + if (atomic_read(&ui->ep0_dir) == USB_DIR_OUT) + ep = &ui->ep0out; + goto ep_queue_done; + } + +ep_queue_done: + return usb_ept_queue_xfer(ep, req); +} + +static int msm72k_dequeue(struct usb_ep *_ep, struct usb_request *_req) +{ + struct msm_endpoint *ep = to_msm_endpoint(_ep); + struct msm_request *req = to_msm_request(_req); + struct usb_info *ui = ep->ui; + + struct msm_request *temp_req; + unsigned long flags; + + if (!(ui && req && ep->req)) + return -EINVAL; + + spin_lock_irqsave(&ui->lock, flags); + if (!req->busy) { + dev_dbg(&ui->pdev->dev, "%s: !req->busy\n", __func__); + spin_unlock_irqrestore(&ui->lock, flags); + return -EINVAL; + } + del_timer(&ep->prime_timer); + /* Stop the transfer */ + do { + writel((1 << ep->bit), USB_ENDPTFLUSH); + while (readl(USB_ENDPTFLUSH) & (1 << ep->bit)) + udelay(100); + } while (readl(USB_ENDPTSTAT) & (1 << ep->bit)); + + req->req.status = 0; + req->busy = 0; + + if (ep->req == req) { + ep->req = req->next; + ep->head->next = req->item->next; + } else { + req->prev->next = req->next; + if (req->next) + req->next->prev = req->prev; + req->prev->item->next = req->item->next; + } + + if (!req->next) + ep->last = req->prev; + + /* initialize request to default */ + req->item->next = TERMINATE; + req->item->info = 0; + req->live = 0; + dma_unmap_single(NULL, req->dma, req->req.length, + (ep->flags & EPT_FLAG_IN) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); + + if (req->req.complete) { + req->req.status = -ECONNRESET; + spin_unlock_irqrestore(&ui->lock, flags); + req->req.complete(&ep->ep, &req->req); + spin_lock_irqsave(&ui->lock, flags); + } + + if (!req->live) { + /* Reprime the endpoint for the remaining transfers */ + for (temp_req = ep->req ; temp_req ; temp_req = temp_req->next) + temp_req->live = 0; + if (ep->req) + usb_ept_start(ep); + spin_unlock_irqrestore(&ui->lock, flags); + return 0; + } + spin_unlock_irqrestore(&ui->lock, flags); + return 0; +} + +static int +usb_ept_set_halt(struct usb_ep *_ep, int value) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + struct usb_info *ui = ept->ui; + unsigned int in = ept->flags & EPT_FLAG_IN; + unsigned int n; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + + n = readl(USB_ENDPTCTRL(ept->num)); + + if (in) { + if (value) + n |= CTRL_TXS; + else { + n &= ~CTRL_TXS; + n |= CTRL_TXR; + } + } else { + if (value) + n |= CTRL_RXS; + else { + n &= ~CTRL_RXS; + n |= CTRL_RXR; + } + } + writel(n, USB_ENDPTCTRL(ept->num)); + if (!value) + ept->wedged = 0; + spin_unlock_irqrestore(&ui->lock, flags); + + return 0; +} + +static int +msm72k_set_halt(struct usb_ep *_ep, int value) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + unsigned int in = ept->flags & EPT_FLAG_IN; + + if (value && in && ept->req) + return -EAGAIN; + + usb_ept_set_halt(_ep, value); + + return 0; +} + +static int +msm72k_fifo_status(struct usb_ep *_ep) +{ + return -EOPNOTSUPP; +} + +static void +msm72k_fifo_flush(struct usb_ep *_ep) +{ + flush_endpoint(to_msm_endpoint(_ep)); +} +static int msm72k_set_wedge(struct usb_ep *_ep) +{ + struct msm_endpoint *ept = to_msm_endpoint(_ep); + + if (ept->num == 0) + return -EINVAL; + + ept->wedged = 1; + + return msm72k_set_halt(_ep, 1); +} + +static const struct usb_ep_ops msm72k_ep_ops = { + .enable = msm72k_enable, + .disable = msm72k_disable, + + .alloc_request = msm72k_alloc_request, + .free_request = msm72k_free_request, + + .queue = msm72k_queue, + .dequeue = msm72k_dequeue, + + .set_halt = msm72k_set_halt, + .set_wedge = msm72k_set_wedge, + .fifo_status = msm72k_fifo_status, + .fifo_flush = msm72k_fifo_flush, +}; + +static int msm72k_get_frame(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + + /* frame number is in bits 13:3 */ + return (readl(USB_FRINDEX) >> 3) & 0x000007FF; +} + +/* VBUS reporting logically comes from a transceiver */ +static int msm72k_udc_vbus_session(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + + if (is_active || atomic_read(&otg->chg_type) + == USB_CHG_TYPE__WALLCHARGER) + wake_lock(&ui->wlock); + + msm_hsusb_set_vbus_state(is_active); + return 0; +} + +/* SW workarounds +Issue #1 - USB Spoof Disconnect Failure +Symptom - Writing 0 to run/stop bit of USBCMD doesn't cause disconnect +SW workaround - Making opmode non-driving and SuspendM set in function + register of SMSC phy +*/ +/* drivers may have software control over D+ pullup */ +static int msm72k_pullup_internal(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + + if (is_active) { + spin_lock_irqsave(&ui->lock, flags); + if (is_usb_online(ui) && ui->driver) + writel(readl(USB_USBCMD) | USBCMD_RS, USB_USBCMD); + spin_unlock_irqrestore(&ui->lock, flags); + } else { + writel(readl(USB_USBCMD) & ~USBCMD_RS, USB_USBCMD); + /* S/W workaround, Issue#1 */ + otg_io_write(ui->xceiv, 0x48, 0x04); + } + + /* Ensure pull-up operation is completed before returning */ + mb(); + + return 0; +} + +static int msm72k_pullup(struct usb_gadget *_gadget, int is_active) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + unsigned long flags; + + atomic_set(&ui->softconnect, is_active); + + spin_lock_irqsave(&ui->lock, flags); + if (ui->usb_state == USB_STATE_NOTATTACHED || ui->driver == NULL || + atomic_read(&otg->chg_type) == USB_CHG_TYPE__WALLCHARGER) { + spin_unlock_irqrestore(&ui->lock, flags); + return 0; + } + spin_unlock_irqrestore(&ui->lock, flags); + + msm72k_pullup_internal(_gadget, is_active); + + if (is_active && !ui->gadget.is_a_peripheral) + schedule_delayed_work(&ui->chg_det, USB_CHG_DET_DELAY); + + return 0; +} + +static int msm72k_wakeup(struct usb_gadget *_gadget) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + struct msm_otg *otg = to_msm_otg(ui->xceiv); + + if (!atomic_read(&ui->remote_wakeup)) { + dev_err(&ui->pdev->dev, + "%s: remote wakeup not supported\n", __func__); + return -ENOTSUPP; + } + + if (!atomic_read(&ui->configured)) { + dev_err(&ui->pdev->dev, + "%s: device is not configured\n", __func__); + return -ENODEV; + } + otg_set_suspend(ui->xceiv, 0); + + disable_irq(otg->irq); + + if (!is_usb_active()) + writel(readl(USB_PORTSC) | PORTSC_FPR, USB_PORTSC); + + /* Ensure that USB port is resumed before enabling the IRQ */ + mb(); + + enable_irq(otg->irq); + + return 0; +} + +/* when Gadget is configured, it will indicate how much power + * can be pulled from vbus, as specified in configuiration descriptor + */ +static int msm72k_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + + + spin_lock_irqsave(&ui->lock, flags); + ui->b_max_pow = mA; + ui->flags = USB_FLAG_CONFIGURED; + spin_unlock_irqrestore(&ui->lock, flags); + + schedule_work(&ui->work); + + return 0; +} + +static int msm72k_set_selfpowered(struct usb_gadget *_gadget, int set) +{ + struct usb_info *ui = container_of(_gadget, struct usb_info, gadget); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ui->lock, flags); + if (set) { + if (ui->pdata && ui->pdata->self_powered) + atomic_set(&ui->self_powered, 1); + else + ret = -EOPNOTSUPP; + } else { + /* We can always work as a bus powered device */ + atomic_set(&ui->self_powered, 0); + } + spin_unlock_irqrestore(&ui->lock, flags); + + return ret; + +} + +static const struct usb_gadget_ops msm72k_ops = { + .get_frame = msm72k_get_frame, + .vbus_session = msm72k_udc_vbus_session, + .vbus_draw = msm72k_udc_vbus_draw, + .pullup = msm72k_pullup, + .wakeup = msm72k_wakeup, + .set_selfpowered = msm72k_set_selfpowered, +}; + +static void usb_do_remote_wakeup(struct work_struct *w) +{ + struct usb_info *ui = the_usb_info; + + msm72k_wakeup(&ui->gadget); +} + +static ssize_t usb_remote_wakeup(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + + msm72k_wakeup(&ui->gadget); + + return count; +} + +static ssize_t show_usb_state(struct device *dev, struct device_attribute *attr, + char *buf) +{ + size_t i; + char *state[] = {"USB_STATE_NOTATTACHED", "USB_STATE_ATTACHED", + "USB_STATE_POWERED", "USB_STATE_UNAUTHENTICATED", + "USB_STATE_RECONNECTING", "USB_STATE_DEFAULT", + "USB_STATE_ADDRESS", "USB_STATE_CONFIGURED", + "USB_STATE_SUSPENDED" + }; + + i = scnprintf(buf, PAGE_SIZE, "%s\n", state[msm_hsusb_get_state()]); + return i; +} + +static ssize_t show_usb_speed(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct usb_info *ui = the_usb_info; + size_t i; + char *speed[] = {"USB_SPEED_UNKNOWN", "USB_SPEED_LOW", + "USB_SPEED_FULL", "USB_SPEED_HIGH"}; + + i = scnprintf(buf, PAGE_SIZE, "%s\n", speed[ui->gadget.speed]); + return i; +} + +static ssize_t store_usb_chg_current(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + unsigned long mA; + + if (ui->gadget.is_a_peripheral) + return -EINVAL; + + if (strict_strtoul(buf, 10, &mA)) + return -EINVAL; + + ui->chg_current = mA; + otg_set_power(ui->xceiv, mA); + + return count; +} + +static ssize_t show_usb_chg_current(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + size_t count; + + count = snprintf(buf, PAGE_SIZE, "%d", ui->chg_current); + + return count; +} + +static ssize_t show_usb_chg_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + struct msm_otg *otg = to_msm_otg(ui->xceiv); + size_t count; + char *chg_type[] = {"STD DOWNSTREAM PORT", + "CARKIT", + "DEDICATED CHARGER", + "INVALID"}; + + count = snprintf(buf, PAGE_SIZE, "%s", + chg_type[atomic_read(&otg->chg_type)]); + + return count; +} +static DEVICE_ATTR(wakeup, S_IWUSR, 0, usb_remote_wakeup); +static DEVICE_ATTR(usb_state, S_IRUSR, show_usb_state, 0); +static DEVICE_ATTR(usb_speed, S_IRUSR, show_usb_speed, 0); +static DEVICE_ATTR(chg_type, S_IRUSR, show_usb_chg_type, 0); +static DEVICE_ATTR(chg_current, S_IWUSR | S_IRUSR, + show_usb_chg_current, store_usb_chg_current); + +#ifdef CONFIG_USB_OTG +static ssize_t store_host_req(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb_info *ui = the_usb_info; + unsigned long val, flags; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + + dev_dbg(&ui->pdev->dev, "%s host request\n", + val ? "set" : "clear"); + + spin_lock_irqsave(&ui->lock, flags); + if (ui->hnp_avail) + ui->gadget.host_request = !!val; + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} +static DEVICE_ATTR(host_request, S_IWUSR, NULL, store_host_req); + +/* How do we notify user space about HNP availability? + * As we are compliant to Rev 2.0, Host will not set a_hnp_support. + * Introduce hnp_avail flag and set when HNP polling request arrives. + * The expectation is that user space checks hnp availability before + * requesting host role via above sysfs node. + */ +static ssize_t show_host_avail(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb_info *ui = the_usb_info; + size_t count; + unsigned long flags; + + spin_lock_irqsave(&ui->lock, flags); + count = snprintf(buf, PAGE_SIZE, "%d\n", ui->hnp_avail); + spin_unlock_irqrestore(&ui->lock, flags); + + return count; +} +static DEVICE_ATTR(host_avail, S_IRUSR, show_host_avail, NULL); + +static struct attribute *otg_attrs[] = { + &dev_attr_host_request.attr, + &dev_attr_host_avail.attr, + NULL, +}; + +static struct attribute_group otg_attr_grp = { + .name = "otg", + .attrs = otg_attrs, +}; +#endif + +static int msm72k_probe(struct platform_device *pdev) +{ + struct usb_info *ui; + struct msm_otg *otg; + int retval; + + dev_dbg(&pdev->dev, "msm72k_probe\n"); + ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL); + if (!ui) + return -ENOMEM; + + ui->pdev = pdev; + ui->pdata = pdev->dev.platform_data; + + ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL); + if (!ui->buf) + return usb_free(ui, -ENOMEM); + + ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0); + if (!ui->pool) + return usb_free(ui, -ENOMEM); + + ui->xceiv = otg_get_transceiver(); + if (!ui->xceiv) + return usb_free(ui, -ENODEV); + + otg = to_msm_otg(ui->xceiv); + ui->addr = otg->regs; + + ui->gadget.ops = &msm72k_ops; + ui->gadget.is_dualspeed = 1; + device_initialize(&ui->gadget.dev); + dev_set_name(&ui->gadget.dev, "gadget"); + ui->gadget.dev.parent = &pdev->dev; + ui->gadget.dev.dma_mask = pdev->dev.dma_mask; + +#ifdef CONFIG_USB_OTG + ui->gadget.is_otg = 1; +#endif + + ui->sdev.name = DRIVER_NAME; + ui->sdev.print_name = print_switch_name; + ui->sdev.print_state = print_switch_state; + + retval = switch_dev_register(&ui->sdev); + if (retval) + return usb_free(ui, retval); + + the_usb_info = ui; + + wake_lock_init(&ui->wlock, + WAKE_LOCK_SUSPEND, "usb_bus_active"); + + usb_debugfs_init(ui); + + usb_prepare(ui); + +#ifdef CONFIG_USB_OTG + retval = sysfs_create_group(&pdev->dev.kobj, &otg_attr_grp); + if (retval) { + dev_err(&ui->pdev->dev, + "failed to create otg sysfs directory:" + "err:(%d)\n", retval); + } +#endif + + retval = otg_set_peripheral(ui->xceiv, &ui->gadget); + if (retval) { + dev_err(&ui->pdev->dev, + "%s: Cannot bind the transceiver, retval:(%d)\n", + __func__, retval); + switch_dev_unregister(&ui->sdev); + wake_lock_destroy(&ui->wlock); + return usb_free(ui, retval); + } + + pm_runtime_enable(&pdev->dev); + + /* Setup phy stuck timer */ + if (ui->pdata && ui->pdata->is_phy_status_timer_on) + setup_timer(&phy_status_timer, usb_phy_status_check_timer, 0); + return 0; +} + +int usb_gadget_probe_driver(struct usb_gadget_driver *driver, + int (*bind)(struct usb_gadget *)) +{ + struct usb_info *ui = the_usb_info; + int retval, n; + + if (!driver + || driver->speed < USB_SPEED_FULL + || !bind + || !driver->disconnect + || !driver->setup) + return -EINVAL; + if (!ui) + return -ENODEV; + if (ui->driver) + return -EBUSY; + + /* first hook up the driver ... */ + ui->driver = driver; + ui->gadget.dev.driver = &driver->driver; + ui->gadget.name = driver_name; + INIT_LIST_HEAD(&ui->gadget.ep_list); + ui->gadget.ep0 = &ui->ep0in.ep; + INIT_LIST_HEAD(&ui->gadget.ep0->ep_list); + ui->gadget.speed = USB_SPEED_UNKNOWN; + atomic_set(&ui->softconnect, 1); + + for (n = 1; n < 16; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + for (n = 17; n < 32; n++) { + struct msm_endpoint *ept = ui->ept + n; + list_add_tail(&ept->ep.ep_list, &ui->gadget.ep_list); + ept->ep.maxpacket = 512; + } + + retval = device_add(&ui->gadget.dev); + if (retval) + goto fail; + + retval = bind(&ui->gadget); + if (retval) { + dev_err(&ui->pdev->dev, "bind to driver %s --> error %d\n", + driver->driver.name, retval); + device_del(&ui->gadget.dev); + goto fail; + } + + retval = device_create_file(&ui->gadget.dev, &dev_attr_wakeup); + if (retval != 0) + dev_err(&ui->pdev->dev, "failed to create sysfs entry:" + "(wakeup) error: (%d)\n", retval); + retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_state); + if (retval != 0) + dev_err(&ui->pdev->dev, "failed to create sysfs entry:" + " (usb_state) error: (%d)\n", retval); + + retval = device_create_file(&ui->gadget.dev, &dev_attr_usb_speed); + if (retval != 0) + dev_err(&ui->pdev->dev, "failed to create sysfs entry:" + " (usb_speed) error: (%d)\n", retval); + + retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_type); + if (retval != 0) + dev_err(&ui->pdev->dev, + "failed to create sysfs entry(chg_type): err:(%d)\n", + retval); + retval = device_create_file(&ui->gadget.dev, &dev_attr_chg_current); + if (retval != 0) + dev_err(&ui->pdev->dev, + "failed to create sysfs entry(chg_current):" + "err:(%d)\n", retval); + + dev_dbg(&ui->pdev->dev, "registered gadget driver '%s'\n", + driver->driver.name); + usb_start(ui); + + return 0; + +fail: + ui->driver = NULL; + ui->gadget.dev.driver = NULL; + return retval; +} +EXPORT_SYMBOL(usb_gadget_probe_driver); + +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) +{ + struct usb_info *dev = the_usb_info; + + if (!dev) + return -ENODEV; + if (!driver || driver != dev->driver || !driver->unbind) + return -EINVAL; + + msm72k_pullup_internal(&dev->gadget, 0); + if (dev->irq) { + free_irq(dev->irq, dev); + dev->irq = 0; + } + dev->state = USB_STATE_IDLE; + atomic_set(&dev->configured, 0); + switch_set_state(&dev->sdev, 0); + /* cancel pending ep0 transactions */ + flush_endpoint(&dev->ep0out); + flush_endpoint(&dev->ep0in); + + device_remove_file(&dev->gadget.dev, &dev_attr_wakeup); + device_remove_file(&dev->gadget.dev, &dev_attr_usb_state); + device_remove_file(&dev->gadget.dev, &dev_attr_usb_speed); + device_remove_file(&dev->gadget.dev, &dev_attr_chg_type); + device_remove_file(&dev->gadget.dev, &dev_attr_chg_current); + driver->disconnect(&dev->gadget); + driver->unbind(&dev->gadget); + dev->gadget.dev.driver = NULL; + dev->driver = NULL; + + device_del(&dev->gadget.dev); + + dev_dbg(&dev->pdev->dev, + "unregistered gadget driver '%s'\n", driver->driver.name); + return 0; +} +EXPORT_SYMBOL(usb_gadget_unregister_driver); + + +static int msm72k_udc_runtime_suspend(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: suspending...\n"); + return 0; +} + +static int msm72k_udc_runtime_resume(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: resuming...\n"); + return 0; +} + +static int msm72k_udc_runtime_idle(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: idling...\n"); + return 0; +} + +static struct dev_pm_ops msm72k_udc_dev_pm_ops = { + .runtime_suspend = msm72k_udc_runtime_suspend, + .runtime_resume = msm72k_udc_runtime_resume, + .runtime_idle = msm72k_udc_runtime_idle +}; + +static struct platform_driver usb_driver = { + .probe = msm72k_probe, + .driver = { .name = "msm_hsusb", + .pm = &msm72k_udc_dev_pm_ops, }, +}; + +static int __init init(void) +{ + return platform_driver_register(&usb_driver); +} +module_init(init); + +static void __exit cleanup(void) +{ + platform_driver_unregister(&usb_driver); +} +module_exit(cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("Mike Lockwood, Brian Swetland"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c index 68dbcc3e4cc..1852c8a20c3 100644 --- a/drivers/usb/gadget/pch_udc.c +++ b/drivers/usb/gadget/pch_udc.c @@ -320,6 +320,7 @@ struct pch_udc_ep { * @registered: driver regsitered with system * @suspended: driver in suspended state * @connected: gadget driver associated + * @vbus_session: required vbus_session state * @set_cfg_not_acked: pending acknowledgement 4 setup * @waiting_zlp_ack: pending acknowledgement 4 ZLP * @data_requests: DMA pool for data requests @@ -346,6 +347,7 @@ struct pch_udc_dev { registered:1, suspended:1, connected:1, + vbus_session:1, set_cfg_not_acked:1, waiting_zlp_ack:1; struct pci_pool *data_requests; @@ -363,6 +365,7 @@ struct pch_udc_dev { #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808 #define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D +#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808 static const char ep0_string[] = "ep0in"; static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */ @@ -561,6 +564,29 @@ static void pch_udc_clear_disconnect(struct pch_udc_dev *dev) pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); } +/** + * pch_udc_reconnect() - This API initializes usb device controller, + * and clear the disconnect status. + * @dev: Reference to pch_udc_regs structure + */ +static void pch_udc_init(struct pch_udc_dev *dev); +static void pch_udc_reconnect(struct pch_udc_dev *dev) +{ + pch_udc_init(dev); + + /* enable device interrupts */ + /* pch_udc_enable_interrupts() */ + pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, + UDC_DEVINT_UR | UDC_DEVINT_ENUM); + + /* Clear the disconnect */ + pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); + pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD); + mdelay(1); + /* Resume USB signalling */ + pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES); +} + /** * pch_udc_vbus_session() - set or clearr the disconnect status. * @dev: Reference to pch_udc_regs structure @@ -571,10 +597,18 @@ static void pch_udc_clear_disconnect(struct pch_udc_dev *dev) static inline void pch_udc_vbus_session(struct pch_udc_dev *dev, int is_active) { - if (is_active) - pch_udc_clear_disconnect(dev); - else + if (is_active) { + pch_udc_reconnect(dev); + dev->vbus_session = 1; + } else { + if (dev->driver && dev->driver->disconnect) { + spin_unlock(&dev->lock); + dev->driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } pch_udc_set_disconnect(dev); + dev->vbus_session = 0; + } } /** @@ -1134,7 +1168,17 @@ static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on) if (!gadget) return -EINVAL; dev = container_of(gadget, struct pch_udc_dev, gadget); - pch_udc_vbus_session(dev, is_on); + if (is_on) { + pch_udc_reconnect(dev); + } else { + if (dev->driver && dev->driver->disconnect) { + spin_unlock(&dev->lock); + dev->driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } + pch_udc_set_disconnect(dev); + } + return 0; } @@ -2338,8 +2382,11 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev) /* Complete request queue */ empty_req_queue(ep); } - if (dev->driver && dev->driver->disconnect) + if (dev->driver && dev->driver->disconnect) { + spin_unlock(&dev->lock); dev->driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } } /** @@ -2374,6 +2421,11 @@ static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev) pch_udc_set_dma(dev, DMA_DIR_TX); pch_udc_set_dma(dev, DMA_DIR_RX); pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX])); + + /* enable device interrupts */ + pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US | + UDC_DEVINT_ES | UDC_DEVINT_ENUM | + UDC_DEVINT_SI | UDC_DEVINT_SC); } /** @@ -2475,8 +2527,24 @@ static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr) if (dev_intr & UDC_DEVINT_SC) pch_udc_svc_cfg_interrupt(dev); /* USB Suspend interrupt */ - if (dev_intr & UDC_DEVINT_US) + if (dev_intr & UDC_DEVINT_US) { + if (dev->driver + && dev->driver->suspend) { + spin_unlock(&dev->lock); + dev->driver->suspend(&dev->gadget); + spin_lock(&dev->lock); + } + + if (dev->vbus_session == 0) { + if (dev->driver && dev->driver->disconnect) { + spin_unlock(&dev->lock); + dev->driver->disconnect(&dev->gadget); + spin_lock(&dev->lock); + } + pch_udc_reconnect(dev); + } dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n"); + } /* Clear the SOF interrupt, if enabled */ if (dev_intr & UDC_DEVINT_SOF) dev_dbg(&dev->pdev->dev, "SOF\n"); @@ -2502,6 +2570,14 @@ static irqreturn_t pch_udc_isr(int irq, void *pdev) dev_intr = pch_udc_read_device_interrupts(dev); ep_intr = pch_udc_read_ep_interrupts(dev); + /* For a hot plug, this find that the controller is hung up. */ + if (dev_intr == ep_intr) + if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) { + dev_dbg(&dev->pdev->dev, "UDC: Hung up\n"); + /* The controller is reset */ + pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR); + return IRQ_HANDLED; + } if (dev_intr) /* Clear device interrupts */ pch_udc_write_device_interrupts(dev, dev_intr); @@ -2915,8 +2991,10 @@ static int pch_udc_probe(struct pci_dev *pdev, } pch_udc = dev; /* initialize the hardware */ - if (pch_udc_pcd_init(dev)) + if (pch_udc_pcd_init(dev)) { + retval = -ENODEV; goto finished; + } if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME, dev)) { dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__, @@ -2971,6 +3049,11 @@ static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = { .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, .class_mask = 0xffffffff, }, + { + PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC), + .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe, + .class_mask = 0xffffffff, + }, { 0 }, }; diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 88a464cc96c..271ef94668e 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -1602,7 +1602,7 @@ cleanup(void) if (status) ERROR(dev, "usb_gadget_unregister_driver %x\n", status); - unregister_chrdev_region(g_printer_devno, 1); + unregister_chrdev_region(g_printer_devno, 2); class_destroy(usb_gadget_class); mutex_unlock(&usb_printer_gadget.lock_printer_io); } diff --git a/drivers/usb/gadget/qcom_maemo.c b/drivers/usb/gadget/qcom_maemo.c new file mode 100644 index 00000000000..39686c4e05c --- /dev/null +++ b/drivers/usb/gadget/qcom_maemo.c @@ -0,0 +1,304 @@ +/* + * Qualcomm Maemo Composite driver + * + * Copyright (C) 2008 David Brownell + * Copyright (C) 2008 Nokia Corporation + * Copyright (C) 2009 Samsung Electronics + * Copyright (c) 2010, Code Aurora Forum. All rights reserved. + * + * This program from the Code Aurora Forum is free software; you can + * redistribute it and/or modify it under the GNU General Public License + * version 2 and only version 2 as published by the Free Software Foundation. + * The original work available from [git.kernel.org ] is subject to the + * notice below. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include + + +#define DRIVER_DESC "Qcom Maemo Composite Gadget" +#define VENDOR_ID 0x05c6 +#define PRODUCT_ID 0x902E + +/* + * kbuild is not very cooperative with respect to linking separately + * compiled library objects into one module. So for now we won't use + * separate compilation ... ensuring init/exit sections work to shrink + * the runtime footprint, and giving us at least some parts of what + * a "gcc --combine ... part1.c part2.c part3.c ... " build would. + */ + +#include "composite.c" +#include "usbstring.c" +#include "config.c" +#include "epautoconf.c" + +#define USB_ETH + +#define USB_ETH_RNDIS +#ifdef USB_ETH_RNDIS +# include "f_rndis.c" +# include "rndis.c" +#endif + + +#include "u_serial.c" +#include "f_serial.c" + +#include "u_ether.c" + +#undef DBG /* u_ether.c has broken idea about macros */ +#undef VDBG /* so clean up after it */ +#undef ERROR +#undef INFO + +#include "f_mass_storage.c" +#include "f_diag.c" +#include "f_rmnet.c" + +/*-------------------------------------------------------------------------*/ +/* string IDs are assigned dynamically */ + +#define STRING_MANUFACTURER_IDX 0 +#define STRING_PRODUCT_IDX 1 +#define STRING_SERIAL_IDX 2 + +/* String Table */ +static struct usb_string strings_dev[] = { + /* These dummy values should be overridden by platform data */ + [STRING_MANUFACTURER_IDX].s = "Qualcomm Incorporated", + [STRING_PRODUCT_IDX].s = "Usb composition", + [STRING_SERIAL_IDX].s = "0123456789ABCDEF", + { } /* end of list */ +}; + +static struct usb_gadget_strings stringtab_dev = { + .language = 0x0409, /* en-us */ + .strings = strings_dev, +}; + +static struct usb_gadget_strings *dev_strings[] = { + &stringtab_dev, + NULL, +}; + +static struct usb_device_descriptor device_desc = { + .bLength = sizeof(device_desc), + .bDescriptorType = USB_DT_DEVICE, + .bcdUSB = __constant_cpu_to_le16(0x0200), + .bDeviceClass = USB_CLASS_PER_INTERFACE, + .bDeviceSubClass = 0, + .bDeviceProtocol = 0, + .idVendor = __constant_cpu_to_le16(VENDOR_ID), + .idProduct = __constant_cpu_to_le16(PRODUCT_ID), + .bcdDevice = __constant_cpu_to_le16(0xffff), + .bNumConfigurations = 1, +}; + +static u8 hostaddr[ETH_ALEN]; +static struct usb_diag_ch *diag_ch; +static struct usb_diag_platform_data usb_diag_pdata = { + .ch_name = DIAG_LEGACY, +}; + +/****************************** Configurations ******************************/ +static struct fsg_module_parameters mod_data = { + .stall = 0 +}; +FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); + +static struct fsg_common *fsg_common; +static int maemo_setup_config(struct usb_configuration *c, + const struct usb_ctrlrequest *ctrl); + +static int maemo_do_config(struct usb_configuration *c) +{ + int ret; + + ret = rndis_bind_config(c, hostaddr); + if (ret < 0) + return ret; + + ret = diag_function_add(c); + if (ret < 0) + return ret; + + ret = gser_bind_config(c, 0); + if (ret < 0) + return ret; + + ret = gser_bind_config(c, 1); + if (ret < 0) + return ret; + + ret = rmnet_function_add(c); + if (ret < 0) + return ret; + + ret = fsg_add(c->cdev, c, fsg_common); + if (ret < 0) + return ret; + + return 0; +} + +static struct usb_configuration maemo_config_driver = { + .label = "Qcom Maemo Gadget", + .bind = maemo_do_config, + .setup = maemo_setup_config, + .bConfigurationValue = 1, + .bMaxPower = 0xFA, +}; +static int maemo_setup_config(struct usb_configuration *c, + const struct usb_ctrlrequest *ctrl) +{ + int i; + int ret = -EOPNOTSUPP; + + for (i = 0; i < maemo_config_driver.next_interface_id; i++) { + if (maemo_config_driver.interface[i]->setup) { + ret = maemo_config_driver.interface[i]->setup( + maemo_config_driver.interface[i], ctrl); + if (ret >= 0) + return ret; + } + } + + return ret; +} + +static int maemo_bind(struct usb_composite_dev *cdev) +{ + struct usb_gadget *gadget = cdev->gadget; + int status, gcnum; + + /* set up diag channel */ + diag_ch = diag_setup(&usb_diag_pdata); + if (IS_ERR(diag_ch)) + return PTR_ERR(diag_ch); + + /* set up network link layer */ + status = gether_setup(cdev->gadget, hostaddr); + if (status < 0) + goto diag_clean; + + /* set up serial link layer */ + status = gserial_setup(cdev->gadget, 2); + if (status < 0) + goto fail0; + + /* set up mass storage function */ + fsg_common = fsg_common_from_params(0, cdev, &mod_data); + if (IS_ERR(fsg_common)) { + status = PTR_ERR(fsg_common); + goto fail1; + } + + gcnum = usb_gadget_controller_number(gadget); + if (gcnum >= 0) + device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); + else { + /* gadget zero is so simple (for now, no altsettings) that + * it SHOULD NOT have problems with bulk-capable hardware. + * so just warn about unrcognized controllers -- don't panic. + * + * things like configuration and altsetting numbering + * can need hardware-specific attention though. + */ + WARNING(cdev, "controller '%s' not recognized\n", + gadget->name); + device_desc.bcdDevice = __constant_cpu_to_le16(0x9999); + } + + /* Allocate string descriptor numbers ... note that string + * contents can be overridden by the composite_dev glue. + */ + + status = usb_string_id(cdev); + if (status < 0) + goto fail2; + strings_dev[STRING_MANUFACTURER_IDX].id = status; + device_desc.iManufacturer = status; + + status = usb_string_id(cdev); + if (status < 0) + goto fail2; + strings_dev[STRING_PRODUCT_IDX].id = status; + device_desc.iProduct = status; + + if (!usb_gadget_set_selfpowered(gadget)) + maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_SELFPOWER; + + if (gadget->ops->wakeup) + maemo_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; + + /* register our first configuration */ + status = usb_add_config(cdev, &maemo_config_driver); + if (status < 0) + goto fail2; + + usb_gadget_set_selfpowered(gadget); + dev_info(&gadget->dev, DRIVER_DESC "\n"); + fsg_common_put(fsg_common); + return 0; + +fail2: + fsg_common_put(fsg_common); +fail1: + gserial_cleanup(); +fail0: + gether_cleanup(); +diag_clean: + diag_cleanup(diag_ch); + + return status; +} + +static int __exit maemo_unbind(struct usb_composite_dev *cdev) +{ + gserial_cleanup(); + gether_cleanup(); + diag_cleanup(diag_ch); + return 0; +} + +static struct usb_composite_driver qcom_maemo_driver = { + .name = "Qcom Maemo Gadget", + .dev = &device_desc, + .strings = dev_strings, + .bind = maemo_bind, + .unbind = __exit_p(maemo_unbind), +}; + +static int __init qcom_maemo_usb_init(void) +{ + return usb_composite_register(&qcom_maemo_driver); +} +module_init(qcom_maemo_usb_init); + +static void __exit qcom_maemo_usb_cleanup(void) +{ + usb_composite_unregister(&qcom_maemo_driver); +} +module_exit(qcom_maemo_usb_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("1.0"); diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 791cb0fa67d..2b994a5c3fa 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -605,12 +605,12 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf) resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION); resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS); resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3); - resp->MaxPacketsPerTransfer = cpu_to_le32(1); - resp->MaxTransferSize = cpu_to_le32( - params->dev->mtu + resp->MaxPacketsPerTransfer = cpu_to_le32(TX_SKB_HOLD_THRESHOLD); + resp->MaxTransferSize = cpu_to_le32(TX_SKB_HOLD_THRESHOLD * + (params->dev->mtu + sizeof(struct ethhdr) + sizeof(struct rndis_packet_msg_type) - + 22); + + 22)); resp->PacketAlignmentFactor = cpu_to_le32(0); resp->AFListOffset = cpu_to_le32(0); resp->AFListSize = cpu_to_le32(0); @@ -1166,15 +1166,11 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ -static bool rndis_initialized; int rndis_init(void) { u8 i; - if (rndis_initialized) - return 0; - for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { #ifdef CONFIG_USB_GADGET_DEBUG_FILES char name [20]; @@ -1201,7 +1197,6 @@ int rndis_init(void) INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue)); } - rndis_initialized = true; return 0; } @@ -1210,13 +1205,7 @@ void rndis_exit(void) #ifdef CONFIG_USB_GADGET_DEBUG_FILES u8 i; char name[20]; -#endif - if (!rndis_initialized) - return; - rndis_initialized = false; - -#ifdef CONFIG_USB_GADGET_DEBUG_FILES for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { sprintf(name, NAME_TEMPLATE, i); remove_proc_entry(name, NULL); diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index 680d0326c40..15059670b40 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -274,7 +274,12 @@ static struct fsg_lun *fsg_lun_from_dev(struct device *dev) #define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ /* Number of buffers for CBW, DATA and CSW */ -#define FSG_NUM_BUFFERS 8 +#ifdef CONFIG_USB_CSW_HACK +#define FSG_NUM_BUFFERS 4 +#else +#define FSG_NUM_BUFFERS 2 +#endif + /* Default size of buffer length. */ #define FSG_BUFLEN ((u32)16384) diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c index 70cbd2f44a1..f48e7f7f626 100644 --- a/drivers/usb/gadget/u_bam.c +++ b/drivers/usb/gadget/u_bam.c @@ -23,12 +23,18 @@ #include #include +#include +#include + #include "u_rmnet.h" #define BAM_N_PORTS 1 +#define BAM2BAM_N_PORTS 1 static struct workqueue_struct *gbam_wq; static int n_bam_ports; +static int n_bam2bam_ports; +static unsigned n_tx_req_queued; static unsigned bam_ch_ids[] = { 8 }; static const char *bam_ch_names[] = { "bam_dmux_ch_8" }; @@ -45,6 +51,8 @@ static const char *bam_ch_names[] = { "bam_dmux_ch_8" }; #define BAM_MUX_TX_Q_SIZE 200 #define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR) +#define DL_INTR_THRESHOLD 20 + unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD; module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); @@ -66,8 +74,16 @@ module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR); unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE; module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR); +unsigned int dl_intr_threshold = DL_INTR_THRESHOLD; +module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR); + #define BAM_CH_OPENED BIT(0) #define BAM_CH_READY BIT(1) +#define SPS_PARAMS_PIPE_ID_MASK (0x1F) +#define SPS_PARAMS_SPS_MODE BIT(5) +#define SPS_PARAMS_TBE BIT(6) +#define MSM_VENDOR_ID BIT(16) + struct bam_ch_info { unsigned long flags; unsigned id; @@ -80,6 +96,14 @@ struct bam_ch_info { struct gbam_port *port; struct work_struct write_tobam_w; + struct work_struct write_tohost_w; + + struct usb_request *rx_req; + struct usb_request *tx_req; + + u8 src_pipe_idx; + u8 dst_pipe_idx; + u8 connection_idx; /* stats */ unsigned int pending_with_bam; @@ -93,9 +117,11 @@ struct bam_ch_info { struct gbam_port { unsigned port_num; - spinlock_t port_lock; + spinlock_t port_lock_ul; + spinlock_t port_lock_dl; struct grmnet *port_usb; + struct grmnet *gr; struct bam_ch_info data_ch; @@ -108,7 +134,10 @@ static struct bam_portmaster { struct platform_driver pdrv; } bam_ports[BAM_N_PORTS]; +struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS]; static void gbam_start_rx(struct gbam_port *port); +static void gbam_start_endless_rx(struct gbam_port *port); +static void gbam_start_endless_tx(struct gbam_port *port); /*---------------misc functions---------------- */ static void gbam_free_requests(struct usb_ep *ep, struct list_head *head) @@ -157,9 +186,9 @@ static void gbam_write_data_tohost(struct gbam_port *port) struct usb_request *req; struct usb_ep *ep; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } @@ -168,7 +197,7 @@ static void gbam_write_data_tohost(struct gbam_port *port) while (!list_empty(&d->tx_idle)) { skb = __skb_dequeue(&d->tx_skb_q); if (!skb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); return; } req = list_first_entry(&d->tx_idle, @@ -177,12 +206,19 @@ static void gbam_write_data_tohost(struct gbam_port *port) req->context = skb; req->buf = skb->data; req->length = skb->len; + n_tx_req_queued++; + if (n_tx_req_queued == dl_intr_threshold) { + req->no_interrupt = 0; + n_tx_req_queued = 0; + } else { + req->no_interrupt = 1; + } list_del(&req->list); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_dl); ret = usb_ep_queue(ep, req, GFP_ATOMIC); - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_dl); if (ret) { pr_err("%s: usb epIn failed\n", __func__); list_add(&req->list, &d->tx_idle); @@ -191,7 +227,18 @@ static void gbam_write_data_tohost(struct gbam_port *port) } d->to_host++; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); +} + +static void gbam_write_data_tohost_w(struct work_struct *w) +{ + struct bam_ch_info *d; + struct gbam_port *port; + + d = container_of(w, struct bam_ch_info, write_tohost_w); + port = d->port; + + gbam_write_data_tohost(port); } void gbam_data_recv_cb(void *p, struct sk_buff *skb) @@ -206,9 +253,9 @@ void gbam_data_recv_cb(void *p, struct sk_buff *skb) pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__, port, port->port_num, d, skb->len); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_dl, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } @@ -218,13 +265,13 @@ void gbam_data_recv_cb(void *p, struct sk_buff *skb) if (printk_ratelimit()) pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n", __func__, d->tohost_drp_cnt); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); dev_kfree_skb_any(skb); return; } __skb_queue_tail(&d->tx_skb_q, skb); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); gbam_write_data_tohost(port); } @@ -240,7 +287,7 @@ void gbam_data_write_done(void *p, struct sk_buff *skb) dev_kfree_skb_any(skb); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); d->pending_with_bam--; @@ -248,7 +295,7 @@ void gbam_data_write_done(void *p, struct sk_buff *skb) port, d, d->to_modem, d->pending_with_bam, port->port_num); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); queue_work(gbam_wq, &d->write_tobam_w); } @@ -265,18 +312,17 @@ static void gbam_data_write_tobam(struct work_struct *w) d = container_of(w, struct bam_ch_info, write_tobam_w); port = d->port; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } while (d->pending_with_bam < BAM_PENDING_LIMIT) { skb = __skb_dequeue(&d->rx_skb_q); - if (!skb) { - spin_unlock_irqrestore(&port->port_lock, flags); - return; - } + if (!skb) + break; + d->pending_with_bam++; d->to_modem++; @@ -284,9 +330,9 @@ static void gbam_data_write_tobam(struct work_struct *w) port, d, d->to_modem, d->pending_with_bam, port->port_num); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = msm_bam_dmux_write(d->id, skb); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { pr_debug("%s: write error:%d\n", __func__, ret); d->pending_with_bam--; @@ -299,7 +345,7 @@ static void gbam_data_write_tobam(struct work_struct *w) qlen = d->rx_skb_q.qlen; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD) gbam_start_rx(port); @@ -331,12 +377,12 @@ static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req) if (!port) return; - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_dl); d = &port->data_ch; list_add_tail(&req->list, &d->tx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_dl); - gbam_write_data_tohost(port); + queue_work(gbam_wq, &d->write_tohost_w); } static void @@ -369,7 +415,7 @@ gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) break; } - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_ul); if (queue) { __skb_queue_tail(&d->rx_skb_q, skb); queue_work(gbam_wq, &d->write_tobam_w); @@ -382,16 +428,16 @@ gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) { list_add_tail(&req->list, &d->rx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); return; } - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC); if (!skb) { - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); return; } skb_reserve(skb, BAM_MUX_HDR); @@ -408,12 +454,26 @@ gbam_epout_complete(struct usb_ep *ep, struct usb_request *req) pr_err("%s: data rx enqueue err %d\n", __func__, status); - spin_lock(&port->port_lock); + spin_lock(&port->port_lock_ul); list_add_tail(&req->list, &d->rx_idle); - spin_unlock(&port->port_lock); + spin_unlock(&port->port_lock_ul); } } +static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req) +{ + int status = req->status; + + pr_debug("%s status: %d\n", __func__, status); +} + +static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req) +{ + int status = req->status; + + pr_debug("%s status: %d\n", __func__, status); +} + static void gbam_start_rx(struct gbam_port *port) { struct usb_request *req; @@ -423,9 +483,9 @@ static void gbam_start_rx(struct gbam_port *port) int ret; struct sk_buff *skb; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } @@ -450,9 +510,9 @@ static void gbam_start_rx(struct gbam_port *port) req->length = bam_mux_rx_req_size; req->context = skb; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); ret = usb_ep_queue(ep, req, GFP_ATOMIC); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (ret) { dev_kfree_skb_any(skb); @@ -466,7 +526,27 @@ static void gbam_start_rx(struct gbam_port *port) break; } } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); +} + +static void gbam_start_endless_rx(struct gbam_port *port) +{ + struct bam_ch_info *d = &port->data_ch; + int status; + + status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC); + if (status) + pr_err("%s: error enqueuing transfer, %d\n", __func__, status); +} + +static void gbam_start_endless_tx(struct gbam_port *port) +{ + struct bam_ch_info *d = &port->data_ch; + int status; + + status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC); + if (status) + pr_err("%s: error enqueuing transfer, %d\n", __func__, status); } static void gbam_start_io(struct gbam_port *port) @@ -478,9 +558,9 @@ static void gbam_start_io(struct gbam_port *port) pr_debug("%s: port:%p\n", __func__, port); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } @@ -493,6 +573,8 @@ static void gbam_start_io(struct gbam_port *port) return; } + spin_unlock_irqrestore(&port->port_lock_ul, flags); + spin_lock_irqsave(&port->port_lock_dl, flags); ep = port->port_usb->in; ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size, gbam_epin_complete, GFP_ATOMIC); @@ -502,7 +584,7 @@ static void gbam_start_io(struct gbam_port *port) return; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock_irqrestore(&port->port_lock_dl, flags); /* queue out requests */ gbam_start_rx(port); @@ -520,6 +602,34 @@ static void gbam_notify(void *p, int event, unsigned long data) } } +static void gbam_free_buffers(struct gbam_port *port) +{ + struct sk_buff *skb; + unsigned long flags; + struct bam_ch_info *d; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + + if (!port || !port->port_usb) + goto free_buf_out; + + d = &port->data_ch; + + gbam_free_requests(port->port_usb->in, &d->tx_idle); + gbam_free_requests(port->port_usb->out, &d->rx_idle); + + while ((skb = __skb_dequeue(&d->tx_skb_q))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&d->rx_skb_q))) + dev_kfree_skb_any(skb); + +free_buf_out: + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); +} + static void gbam_disconnect_work(struct work_struct *w) { struct gbam_port *port = @@ -533,6 +643,24 @@ static void gbam_disconnect_work(struct work_struct *w) clear_bit(BAM_CH_OPENED, &d->flags); } +static void gbam2bam_disconnect_work(struct work_struct *w) +{ + struct gbam_port *port = + container_of(w, struct gbam_port, disconnect_w); + unsigned long flags; + + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + port->port_usb = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + /* disable endpoints */ + usb_ep_disable(port->gr->out); + usb_ep_disable(port->gr->in); + +} + static void gbam_connect_work(struct work_struct *w) { struct gbam_port *port = container_of(w, struct gbam_port, connect_w); @@ -540,12 +668,15 @@ static void gbam_connect_work(struct work_struct *w) int ret; unsigned long flags; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); if (!port->port_usb) { - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); return; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); if (!test_bit(BAM_CH_READY, &d->flags)) return; @@ -563,30 +694,70 @@ static void gbam_connect_work(struct work_struct *w) pr_debug("%s: done\n", __func__); } -static void gbam_free_buffers(struct gbam_port *port) +static void gbam2bam_connect_work(struct work_struct *w) { - struct sk_buff *skb; - unsigned long flags; - struct bam_ch_info *d; + struct gbam_port *port = container_of(w, struct gbam_port, connect_w); + struct bam_ch_info *d = &port->data_ch; + u32 sps_params; + int ret; + unsigned long flags; - spin_lock_irqsave(&port->port_lock, flags); + ret = usb_ep_enable(port->gr->in, port->gr->in_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", + __func__, port->gr->in); + return; + } + port->gr->in->driver_data = port; - if (!port || !port->port_usb) - goto free_buf_out; + ret = usb_ep_enable(port->gr->out, port->gr->out_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", + __func__, port->gr->out); + port->gr->in->driver_data = 0; + return; + } + port->gr->out->driver_data = port; + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); + port->port_usb = port->gr; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + + ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx, + &d->dst_pipe_idx); + if (ret) { + pr_err("%s: usb_bam_connect failed: err:%d\n", + __func__, ret); + return; + } - d = &port->data_ch; + d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL); + if (!d->rx_req) + return; - gbam_free_requests(port->port_usb->in, &d->tx_idle); - gbam_free_requests(port->port_usb->out, &d->rx_idle); + d->rx_req->context = port; + d->rx_req->complete = gbam_endless_rx_complete; + d->rx_req->length = 0; + sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx | + MSM_VENDOR_ID) & ~SPS_PARAMS_TBE; + d->rx_req->udc_priv = sps_params; + d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL); + if (!d->tx_req) + return; - while ((skb = __skb_dequeue(&d->tx_skb_q))) - dev_kfree_skb_any(skb); + d->tx_req->context = port; + d->tx_req->complete = gbam_endless_tx_complete; + d->tx_req->length = 0; + sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx | + MSM_VENDOR_ID) & ~SPS_PARAMS_TBE; + d->tx_req->udc_priv = sps_params; - while ((skb = __skb_dequeue(&d->rx_skb_q))) - dev_kfree_skb_any(skb); + /* queue in & out requests */ + gbam_start_endless_rx(port); + gbam_start_endless_tx(port); -free_buf_out: - spin_unlock_irqrestore(&port->port_lock, flags); + pr_debug("%s: done\n", __func__); } /* BAM data channel ready, allow attempt to open */ @@ -608,10 +779,12 @@ static int gbam_data_ch_probe(struct platform_device *pdev) set_bit(BAM_CH_READY, &d->flags); /* if usb is online, try opening bam_ch */ - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); if (port->port_usb) queue_work(gbam_wq, &port->connect_w); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); break; } @@ -638,12 +811,14 @@ static int gbam_data_ch_remove(struct platform_device *pdev) port = bam_ports[i].port; d = &port->data_ch; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); if (port->port_usb) { ep_in = port->port_usb->in; ep_out = port->port_usb->out; } - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); if (ep_in) usb_ep_fifo_flush(ep_in); @@ -654,6 +829,9 @@ static int gbam_data_ch_remove(struct platform_device *pdev) msm_bam_dmux_close(d->id); + /* bam dmux will free all pending skbs */ + d->pending_with_bam = 0; + clear_bit(BAM_CH_READY, &d->flags); clear_bit(BAM_CH_OPENED, &d->flags); } @@ -673,6 +851,13 @@ static void gbam_port_free(int portno) } } +static void gbam2bam_port_free(int portno) +{ + struct gbam_port *port = bam2bam_ports[portno]; + + kfree(port); +} + static int gbam_port_alloc(int portno) { struct gbam_port *port; @@ -686,7 +871,8 @@ static int gbam_port_alloc(int portno) port->port_num = portno; /* port initialization */ - spin_lock_init(&port->port_lock); + spin_lock_init(&port->port_lock_ul); + spin_lock_init(&port->port_lock_dl); INIT_WORK(&port->connect_w, gbam_connect_work); INIT_WORK(&port->disconnect_w, gbam_disconnect_work); @@ -696,6 +882,7 @@ static int gbam_port_alloc(int portno) INIT_LIST_HEAD(&d->tx_idle); INIT_LIST_HEAD(&d->rx_idle); INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam); + INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost_w); skb_queue_head_init(&d->tx_skb_q); skb_queue_head_init(&d->rx_skb_q); d->id = bam_ch_ids[portno]; @@ -709,6 +896,33 @@ static int gbam_port_alloc(int portno) pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); + pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); + + return 0; +} + +static int gbam2bam_port_alloc(int portno) +{ + struct gbam_port *port; + struct bam_ch_info *d; + + port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->port_num = portno; + + /* port initialization */ + spin_lock_init(&port->port_lock_ul); + spin_lock_init(&port->port_lock_dl); + + INIT_WORK(&port->connect_w, gbam2bam_connect_work); + INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work); + + /* data ch */ + d = &port->data_ch; + d->port = port; + bam2bam_ports[portno] = port; pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); @@ -736,7 +950,8 @@ static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, port = bam_ports[i].port; if (!port) continue; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); d = &port->data_ch; @@ -759,7 +974,8 @@ static ssize_t gbam_read_stats(struct file *file, char __user *ubuf, test_bit(BAM_CH_OPENED, &d->flags), test_bit(BAM_CH_READY, &d->flags)); - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); @@ -782,7 +998,8 @@ static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, if (!port) continue; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); d = &port->data_ch; @@ -792,7 +1009,8 @@ static ssize_t gbam_reset_stats(struct file *file, const char __user *buf, d->tohost_drp_cnt = 0; d->tomodem_drp_cnt = 0; - spin_unlock_irqrestore(&port->port_lock, flags); + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); } return count; } @@ -820,7 +1038,7 @@ static void gbam_debugfs_init(void) static void gam_debugfs_init(void) { } #endif -void gbam_disconnect(struct grmnet *gr, u8 port_num) +void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans) { struct gbam_port *port; unsigned long flags; @@ -828,8 +1046,17 @@ void gbam_disconnect(struct grmnet *gr, u8 port_num) pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); - if (port_num >= n_bam_ports) { - pr_err("%s: invalid portno#%d\n", __func__, port_num); + if (trans == USB_GADGET_XPORT_BAM && + port_num >= n_bam_ports) { + pr_err("%s: invalid bam portno#%d\n", + __func__, port_num); + return; + } + + if (trans == USB_GADGET_XPORT_BAM2BAM && + port_num >= n_bam2bam_ports) { + pr_err("%s: invalid bam2bam portno#%d\n", + __func__, port_num); return; } @@ -837,24 +1064,34 @@ void gbam_disconnect(struct grmnet *gr, u8 port_num) pr_err("%s: grmnet port is null\n", __func__); return; } + if (trans == USB_GADGET_XPORT_BAM) + port = bam_ports[port_num].port; + else + port = bam2bam_ports[port_num]; - port = bam_ports[port_num].port; d = &port->data_ch; + port->gr = gr; - gbam_free_buffers(port); + if (trans == USB_GADGET_XPORT_BAM) { + gbam_free_buffers(port); - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); port->port_usb = 0; - spin_unlock_irqrestore(&port->port_lock, flags); + n_tx_req_queued = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); - /* disable endpoints */ - usb_ep_disable(gr->out); - usb_ep_disable(gr->in); + /* disable endpoints */ + usb_ep_disable(gr->out); + usb_ep_disable(gr->in); + } queue_work(gbam_wq, &port->disconnect_w); } -int gbam_connect(struct grmnet *gr, u8 port_num) +int gbam_connect(struct grmnet *gr, u8 port_num, + enum transport_type trans, u8 connection_idx) { struct gbam_port *port; struct bam_ch_info *d; @@ -863,7 +1100,12 @@ int gbam_connect(struct grmnet *gr, u8 port_num) pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num); - if (port_num >= n_bam_ports) { + if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return -ENODEV; + } + + if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) { pr_err("%s: invalid portno#%d\n", __func__, port_num); return -ENODEV; } @@ -873,52 +1115,66 @@ int gbam_connect(struct grmnet *gr, u8 port_num) return -ENODEV; } - port = bam_ports[port_num].port; + if (trans == USB_GADGET_XPORT_BAM) + port = bam_ports[port_num].port; + else + port = bam2bam_ports[port_num]; + d = &port->data_ch; - ret = usb_ep_enable(gr->in, gr->in_desc); - if (ret) { - pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", - __func__, gr->in); - return ret; - } - gr->in->driver_data = port; + if (trans == USB_GADGET_XPORT_BAM) { + ret = usb_ep_enable(gr->in, gr->in_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", + __func__, gr->in); + return ret; + } + gr->in->driver_data = port; - ret = usb_ep_enable(gr->out, gr->out_desc); - if (ret) { - pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", - __func__, gr->out); - gr->in->driver_data = 0; - return ret; - } - gr->out->driver_data = port; + ret = usb_ep_enable(gr->out, gr->out_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", + __func__, gr->out); + gr->in->driver_data = 0; + return ret; + } + gr->out->driver_data = port; - spin_lock_irqsave(&port->port_lock, flags); + spin_lock_irqsave(&port->port_lock_ul, flags); + spin_lock(&port->port_lock_dl); port->port_usb = gr; - d->to_host = 0; - d->to_modem = 0; - d->pending_with_bam = 0; - d->tohost_drp_cnt = 0; - d->tomodem_drp_cnt = 0; - spin_unlock_irqrestore(&port->port_lock, flags); + d->to_host = 0; + d->to_modem = 0; + d->pending_with_bam = 0; + d->tohost_drp_cnt = 0; + d->tomodem_drp_cnt = 0; + spin_unlock(&port->port_lock_dl); + spin_unlock_irqrestore(&port->port_lock_ul, flags); + } + if (trans == USB_GADGET_XPORT_BAM2BAM) { + port->gr = gr; + d->connection_idx = connection_idx; + } queue_work(gbam_wq, &port->connect_w); return 0; } -int gbam_setup(unsigned int count) +int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port) { int i; int ret; - pr_debug("%s: requested ports:%d\n", __func__, count); + pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n", + __func__, no_bam_port, no_bam2bam_port); - if (!count || count > BAM_N_PORTS) { - pr_err("%s: Invalid num of ports count:%d\n", - __func__, count); + if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS + || no_bam2bam_port > BAM2BAM_N_PORTS) { + pr_err("%s: Invalid num of ports count:%d,%d\n", + __func__, no_bam_port, no_bam2bam_port); return -EINVAL; } @@ -929,7 +1185,7 @@ int gbam_setup(unsigned int count) return -ENOMEM; } - for (i = 0; i < count; i++) { + for (i = 0; i < no_bam_port; i++) { n_bam_ports++; ret = gbam_port_alloc(i); if (ret) { @@ -939,13 +1195,23 @@ int gbam_setup(unsigned int count) } } + for (i = 0; i < no_bam2bam_port; i++) { + n_bam2bam_ports++; + ret = gbam2bam_port_alloc(i); + if (ret) { + n_bam2bam_ports--; + pr_err("%s: Unable to alloc port:%d\n", __func__, i); + goto free_bam_ports; + } + } gbam_debugfs_init(); - return 0; + free_bam_ports: for (i = 0; i < n_bam_ports; i++) gbam_port_free(i); - + for (i = 0; i < n_bam2bam_ports; i++) + gbam2bam_port_free(i); destroy_workqueue(gbam_wq); return ret; diff --git a/drivers/usb/gadget/u_ctrl_hsic.c b/drivers/usb/gadget/u_ctrl_hsic.c new file mode 100644 index 00000000000..fdfab968a69 --- /dev/null +++ b/drivers/usb/gadget/u_ctrl_hsic.c @@ -0,0 +1,617 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* from cdc-acm.h */ +#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ +#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ +#define ACM_CTRL_OVERRUN (1 << 6) +#define ACM_CTRL_PARITY (1 << 5) +#define ACM_CTRL_FRAMING (1 << 4) +#define ACM_CTRL_RI (1 << 3) +#define ACM_CTRL_BRK (1 << 2) +#define ACM_CTRL_DSR (1 << 1) +#define ACM_CTRL_DCD (1 << 0) + + +static unsigned int no_ctrl_ports; + +static const char *ctrl_bridge_names[] = { + "dun_ctrl_hsic0", + "rmnet_ctrl_hsic0" +}; + +#define CTRL_BRIDGE_NAME_MAX_LEN 20 +#define READ_BUF_LEN 1024 + +#define CH_OPENED 0 +#define CH_READY 1 + +struct gctrl_port { + /* port */ + unsigned port_num; + + /* gadget */ + spinlock_t port_lock; + void *port_usb; + + /* work queue*/ + struct workqueue_struct *wq; + struct work_struct connect_w; + struct work_struct disconnect_w; + + enum gadget_type gtype; + + /*ctrl pkt response cb*/ + int (*send_cpkt_response)(void *g, void *buf, size_t len); + + struct bridge brdg; + + /* bridge status */ + unsigned long bridge_sts; + + /* control bits */ + unsigned cbits_tomodem; + unsigned cbits_tohost; + + /* counters */ + unsigned long to_modem; + unsigned long to_host; + unsigned long drp_cpkt_cnt; +}; + +static struct { + struct gctrl_port *port; + struct platform_driver pdrv; +} gctrl_ports[NUM_PORTS]; + +static int ghsic_ctrl_receive(void *dev, void *buf, size_t actual) +{ + struct gctrl_port *port = dev; + int retval = 0; + + pr_debug_ratelimited("%s: read complete bytes read: %d\n", + __func__, actual); + + /* send it to USB here */ + if (port && port->send_cpkt_response) { + retval = port->send_cpkt_response(port->port_usb, buf, actual); + port->to_host++; + } + + return retval; +} + +static int +ghsic_send_cpkt_tomodem(u8 portno, void *buf, size_t len) +{ + void *cbuf; + struct gctrl_port *port; + + if (portno >= no_ctrl_ports) { + pr_err("%s: Invalid portno#%d\n", __func__, portno); + return -ENODEV; + } + + port = gctrl_ports[portno].port; + if (!port) { + pr_err("%s: port is null\n", __func__); + return -ENODEV; + } + + cbuf = kmalloc(len, GFP_ATOMIC); + if (!cbuf) + return -ENOMEM; + + memcpy(cbuf, buf, len); + + /* drop cpkt if ch is not open */ + if (!test_bit(CH_OPENED, &port->bridge_sts)) { + port->drp_cpkt_cnt++; + kfree(cbuf); + return 0; + } + + pr_debug("%s: ctrl_pkt:%d bytes\n", __func__, len); + + ctrl_bridge_write(port->brdg.ch_id, cbuf, len); + + port->to_modem++; + + return 0; +} + +static void +ghsic_send_cbits_tomodem(void *gptr, u8 portno, int cbits) +{ + struct gctrl_port *port; + + if (portno >= no_ctrl_ports || !gptr) { + pr_err("%s: Invalid portno#%d\n", __func__, portno); + return; + } + + port = gctrl_ports[portno].port; + if (!port) { + pr_err("%s: port is null\n", __func__); + return; + } + + if (cbits == port->cbits_tomodem) + return; + + port->cbits_tomodem = cbits; + + if (!test_bit(CH_OPENED, &port->bridge_sts)) + return; + + pr_debug("%s: ctrl_tomodem:%d\n", __func__, cbits); + + ctrl_bridge_set_cbits(port->brdg.ch_id, cbits); +} + +static void ghsic_ctrl_connect_w(struct work_struct *w) +{ + struct gserial *gser = NULL; + struct grmnet *gr = NULL; + struct gctrl_port *port = + container_of(w, struct gctrl_port, connect_w); + unsigned long flags; + int retval; + unsigned cbits; + + if (!port || !test_bit(CH_READY, &port->bridge_sts)) + return; + + pr_debug("%s: port:%p\n", __func__, port); + + retval = ctrl_bridge_open(&port->brdg); + if (retval) { + pr_err("%s: ctrl bridge open failed :%d\n", __func__, retval); + return; + } + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + ctrl_bridge_close(port->brdg.ch_id); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + set_bit(CH_OPENED, &port->bridge_sts); + spin_unlock_irqrestore(&port->port_lock, flags); + + cbits = ctrl_bridge_get_cbits_tohost(port->brdg.ch_id); + + if (port->gtype == USB_GADGET_SERIAL && (cbits & ACM_CTRL_DCD)) { + gser = port->port_usb; + if (gser && gser->connect) + gser->connect(gser); + return; + } + + if (port->gtype == USB_GADGET_RMNET) { + gr = port->port_usb; + if (gr && gr->connect) + gr->connect(gr); + } +} + +int ghsic_ctrl_connect(void *gptr, int port_num) +{ + struct gctrl_port *port; + struct gserial *gser; + struct grmnet *gr; + unsigned long flags; + + pr_debug("%s: port#%d\n", __func__, port_num); + + if (port_num > no_ctrl_ports || !gptr) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return -ENODEV; + } + + port = gctrl_ports[port_num].port; + if (!port) { + pr_err("%s: port is null\n", __func__); + return -ENODEV; + } + + spin_lock_irqsave(&port->port_lock, flags); + if (port->gtype == USB_GADGET_SERIAL) { + gser = gptr; + gser->notify_modem = ghsic_send_cbits_tomodem; + } + + if (port->gtype == USB_GADGET_RMNET) { + gr = gptr; + port->send_cpkt_response = gr->send_cpkt_response; + gr->send_encap_cmd = ghsic_send_cpkt_tomodem; + gr->notify_modem = ghsic_send_cbits_tomodem; + } + + port->port_usb = gptr; + port->to_host = 0; + port->to_modem = 0; + port->drp_cpkt_cnt = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->connect_w); + + return 0; +} + +static void gctrl_disconnect_w(struct work_struct *w) +{ + struct gctrl_port *port = + container_of(w, struct gctrl_port, disconnect_w); + + if (!test_bit(CH_OPENED, &port->bridge_sts)) + return; + + /* send the dtr zero */ + ctrl_bridge_close(port->brdg.ch_id); + clear_bit(CH_OPENED, &port->bridge_sts); +} + +void ghsic_ctrl_disconnect(void *gptr, int port_num) +{ + struct gctrl_port *port; + struct gserial *gser = NULL; + struct grmnet *gr = NULL; + unsigned long flags; + + pr_debug("%s: port#%d\n", __func__, port_num); + + port = gctrl_ports[port_num].port; + + if (port_num > no_ctrl_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return; + } + + if (!gptr || !port) { + pr_err("%s: grmnet port is null\n", __func__); + return; + } + + if (port->gtype == USB_GADGET_SERIAL) + gser = gptr; + else + gr = gptr; + + spin_lock_irqsave(&port->port_lock, flags); + if (gr) { + gr->send_encap_cmd = 0; + gr->notify_modem = 0; + } + + if (gser) + gser->notify_modem = 0; + port->cbits_tomodem = 0; + port->port_usb = 0; + port->send_cpkt_response = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->disconnect_w); +} + +static void ghsic_ctrl_status(void *ctxt, unsigned int ctrl_bits) +{ + struct gctrl_port *port = ctxt; + struct gserial *gser; + + pr_debug("%s - input control lines: dcd%c dsr%c break%c " + "ring%c framing%c parity%c overrun%c\n", __func__, + ctrl_bits & ACM_CTRL_DCD ? '+' : '-', + ctrl_bits & ACM_CTRL_DSR ? '+' : '-', + ctrl_bits & ACM_CTRL_BRK ? '+' : '-', + ctrl_bits & ACM_CTRL_RI ? '+' : '-', + ctrl_bits & ACM_CTRL_FRAMING ? '+' : '-', + ctrl_bits & ACM_CTRL_PARITY ? '+' : '-', + ctrl_bits & ACM_CTRL_OVERRUN ? '+' : '-'); + + port->cbits_tohost = ctrl_bits; + gser = port->port_usb; + if (gser && gser->send_modem_ctrl_bits) + gser->send_modem_ctrl_bits(gser, ctrl_bits); +} + +static int ghsic_ctrl_probe(struct platform_device *pdev) +{ + struct gctrl_port *port; + unsigned long flags; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + if (pdev->id >= no_ctrl_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gctrl_ports[pdev->id].port; + set_bit(CH_READY, &port->bridge_sts); + + /* if usb is online, start read */ + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) + queue_work(port->wq, &port->connect_w); + spin_unlock_irqrestore(&port->port_lock, flags); + + return 0; +} + +static int ghsic_ctrl_remove(struct platform_device *pdev) +{ + struct gctrl_port *port; + struct gserial *gser = NULL; + struct grmnet *gr = NULL; + unsigned long flags; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + if (pdev->id >= no_ctrl_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gctrl_ports[pdev->id].port; + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + goto not_ready; + } + + if (port->gtype == USB_GADGET_SERIAL) + gser = port->port_usb; + else + gr = port->port_usb; + + port->cbits_tohost = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + if (gr && gr->disconnect) + gr->disconnect(gr); + + if (gser && gser->disconnect) + gser->disconnect(gser); + + ctrl_bridge_close(port->brdg.ch_id); + + clear_bit(CH_OPENED, &port->bridge_sts); +not_ready: + clear_bit(CH_READY, &port->bridge_sts); + + return 0; +} + +static void ghsic_ctrl_port_free(int portno) +{ + struct gctrl_port *port = gctrl_ports[portno].port; + struct platform_driver *pdrv = &gctrl_ports[portno].pdrv; + + destroy_workqueue(port->wq); + kfree(port); + + if (pdrv) + platform_driver_unregister(pdrv); +} + +static int gctrl_port_alloc(int portno, enum gadget_type gtype) +{ + struct gctrl_port *port; + struct platform_driver *pdrv; + + port = kzalloc(sizeof(struct gctrl_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->wq = create_singlethread_workqueue(ctrl_bridge_names[portno]); + if (!port->wq) { + pr_err("%s: Unable to create workqueue:%s\n", + __func__, ctrl_bridge_names[portno]); + return -ENOMEM; + } + + port->port_num = portno; + port->gtype = gtype; + + spin_lock_init(&port->port_lock); + + INIT_WORK(&port->connect_w, ghsic_ctrl_connect_w); + INIT_WORK(&port->disconnect_w, gctrl_disconnect_w); + + port->brdg.ch_id = portno; + port->brdg.ctx = port; + port->brdg.ops.send_pkt = ghsic_ctrl_receive; + if (port->gtype == USB_GADGET_SERIAL) + port->brdg.ops.send_cbits = ghsic_ctrl_status; + gctrl_ports[portno].port = port; + + pdrv = &gctrl_ports[portno].pdrv; + pdrv->probe = ghsic_ctrl_probe; + pdrv->remove = ghsic_ctrl_remove; + pdrv->driver.name = ctrl_bridge_names[portno]; + pdrv->driver.owner = THIS_MODULE; + + platform_driver_register(pdrv); + + pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); + + return 0; +} + +int ghsic_ctrl_setup(unsigned int num_ports, enum gadget_type gtype) +{ + int first_port_id = no_ctrl_ports; + int total_num_ports = num_ports + no_ctrl_ports; + int i; + int ret = 0; + + if (!num_ports || total_num_ports > NUM_PORTS) { + pr_err("%s: Invalid num of ports count:%d\n", + __func__, num_ports); + return -EINVAL; + } + + pr_debug("%s: requested ports:%d\n", __func__, num_ports); + + for (i = first_port_id; i < (first_port_id + num_ports); i++) { + + /*probe can be called while port_alloc,so update no_ctrl_ports*/ + no_ctrl_ports++; + ret = gctrl_port_alloc(i, gtype); + if (ret) { + no_ctrl_ports--; + pr_err("%s: Unable to alloc port:%d\n", __func__, i); + goto free_ports; + } + } + + return first_port_id; + +free_ports: + for (i = first_port_id; i < no_ctrl_ports; i++) + ghsic_ctrl_port_free(i); + no_ctrl_ports = first_port_id; + return ret; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t gctrl_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct gctrl_port *port; + struct platform_driver *pdrv; + char *buf; + unsigned long flags; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < no_ctrl_ports; i++) { + port = gctrl_ports[i].port; + if (!port) + continue; + pdrv = &gctrl_ports[i].pdrv; + spin_lock_irqsave(&port->port_lock, flags); + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName: %s\n" + "#PORT:%d port: %p\n" + "to_usbhost: %lu\n" + "to_modem: %lu\n" + "cpkt_drp_cnt: %lu\n" + "DTR: %s\n" + "ch_open: %d\n" + "ch_ready: %d\n", + pdrv->driver.name, + i, port, + port->to_host, port->to_modem, + port->drp_cpkt_cnt, + port->cbits_tomodem ? "HIGH" : "LOW", + test_bit(CH_OPENED, &port->bridge_sts), + test_bit(CH_READY, &port->bridge_sts)); + + spin_unlock_irqrestore(&port->port_lock, flags); + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t gctrl_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct gctrl_port *port; + int i; + unsigned long flags; + + for (i = 0; i < no_ctrl_ports; i++) { + port = gctrl_ports[i].port; + if (!port) + continue; + + spin_lock_irqsave(&port->port_lock, flags); + port->to_host = 0; + port->to_modem = 0; + port->drp_cpkt_cnt = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + } + return count; +} + +const struct file_operations gctrl_stats_ops = { + .read = gctrl_read_stats, + .write = gctrl_reset_stats, +}; + +struct dentry *gctrl_dent; +struct dentry *gctrl_dfile; +static void gctrl_debugfs_init(void) +{ + gctrl_dent = debugfs_create_dir("ghsic_ctrl_xport", 0); + if (IS_ERR(gctrl_dent)) + return; + + gctrl_dfile = + debugfs_create_file("status", 0444, gctrl_dent, 0, + &gctrl_stats_ops); + if (!gctrl_dfile || IS_ERR(gctrl_dfile)) + debugfs_remove(gctrl_dent); +} + +static void gctrl_debugfs_exit(void) +{ + debugfs_remove(gctrl_dfile); + debugfs_remove(gctrl_dent); +} + +#else +static void gctrl_debugfs_init(void) { } +static void gctrl_debugfs_exit(void) { } +#endif + +static int __init gctrl_init(void) +{ + gctrl_debugfs_init(); + + return 0; +} +module_init(gctrl_init); + +static void __exit gctrl_exit(void) +{ + gctrl_debugfs_exit(); +} +module_exit(gctrl_exit); +MODULE_DESCRIPTION("hsic control xport driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/gadget/u_data_hsic.c b/drivers/usb/gadget/u_data_hsic.c new file mode 100644 index 00000000000..6f5e7b3c8eb --- /dev/null +++ b/drivers/usb/gadget/u_data_hsic.c @@ -0,0 +1,962 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned int no_data_ports; + +static const char *data_bridge_names[] = { + "dun_data_hsic0", + "rmnet_data_hsic0" +}; + +#define DATA_BRIDGE_NAME_MAX_LEN 20 + +#define GHSIC_DATA_RMNET_RX_Q_SIZE 50 +#define GHSIC_DATA_RMNET_TX_Q_SIZE 300 +#define GHSIC_DATA_SERIAL_RX_Q_SIZE 2 +#define GHSIC_DATA_SERIAL_TX_Q_SIZE 2 +#define GHSIC_DATA_RX_REQ_SIZE 2048 + +static unsigned int ghsic_data_rmnet_tx_q_size = GHSIC_DATA_RMNET_TX_Q_SIZE; +module_param(ghsic_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_rmnet_rx_q_size = GHSIC_DATA_RMNET_RX_Q_SIZE; +module_param(ghsic_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_serial_tx_q_size = GHSIC_DATA_SERIAL_TX_Q_SIZE; +module_param(ghsic_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_serial_rx_q_size = GHSIC_DATA_SERIAL_RX_Q_SIZE; +module_param(ghsic_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_rx_req_size = GHSIC_DATA_RX_REQ_SIZE; +module_param(ghsic_data_rx_req_size, uint, S_IRUGO | S_IWUSR); + +/*flow ctrl*/ +#define GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD 500 +#define GHSIC_DATA_FLOW_CTRL_DISABLE 300 +#define GHSIC_DATA_FLOW_CTRL_SUPPORT 1 +#define GHSIC_DATA_PENDLIMIT_WITH_BRIDGE 500 + +static unsigned int ghsic_data_fctrl_support = GHSIC_DATA_FLOW_CTRL_SUPPORT; +module_param(ghsic_data_fctrl_support, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_fctrl_en_thld = + GHSIC_DATA_FLOW_CTRL_EN_THRESHOLD; +module_param(ghsic_data_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_fctrl_dis_thld = GHSIC_DATA_FLOW_CTRL_DISABLE; +module_param(ghsic_data_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); + +static unsigned int ghsic_data_pend_limit_with_bridge = + GHSIC_DATA_PENDLIMIT_WITH_BRIDGE; +module_param(ghsic_data_pend_limit_with_bridge, uint, S_IRUGO | S_IWUSR); + +#define CH_OPENED 0 +#define CH_READY 1 + +struct gdata_port { + /* port */ + unsigned port_num; + + /* gadget */ + spinlock_t port_lock; + void *port_usb; + struct usb_ep *in; + struct usb_ep *out; + + enum gadget_type gtype; + + /* data transfer queues */ + unsigned int tx_q_size; + struct list_head tx_idle; + struct sk_buff_head tx_skb_q; + + unsigned int rx_q_size; + struct list_head rx_idle; + struct sk_buff_head rx_skb_q; + + /* work */ + struct workqueue_struct *wq; + struct work_struct connect_w; + struct work_struct disconnect_w; + struct work_struct write_tomdm_w; + struct work_struct write_tohost_w; + + struct bridge brdg; + + /*bridge status*/ + unsigned long bridge_sts; + + /*counters*/ + unsigned long to_modem; + unsigned long to_host; + unsigned int rx_throttled_cnt; + unsigned int rx_unthrottled_cnt; + unsigned int tx_throttled_cnt; + unsigned int tx_unthrottled_cnt; + unsigned int tomodem_drp_cnt; + unsigned int unthrottled_pnd_skbs; +}; + +static struct { + struct gdata_port *port; + struct platform_driver pdrv; +} gdata_ports[NUM_PORTS]; + +static void ghsic_data_start_rx(struct gdata_port *port); + +static void ghsic_data_free_requests(struct usb_ep *ep, struct list_head *head) +{ + struct usb_request *req; + + while (!list_empty(head)) { + req = list_entry(head->next, struct usb_request, list); + list_del(&req->list); + usb_ep_free_request(ep, req); + } +} + +static int ghsic_data_alloc_requests(struct usb_ep *ep, struct list_head *head, + int num, + void (*cb)(struct usb_ep *ep, struct usb_request *), + gfp_t flags) +{ + int i; + struct usb_request *req; + + pr_debug("%s: ep:%s head:%p num:%d cb:%p", __func__, + ep->name, head, num, cb); + + for (i = 0; i < num; i++) { + req = usb_ep_alloc_request(ep, flags); + if (!req) { + pr_debug("%s: req allocated:%d\n", __func__, i); + return list_empty(head) ? -ENOMEM : 0; + } + req->complete = cb; + list_add(&req->list, head); + } + + return 0; +} + +static void ghsic_data_unthrottle_tx(void *ctx) +{ + struct gdata_port *port = ctx; + unsigned long flags; + + if (!port) + return; + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) { + port->tx_unthrottled_cnt++; + queue_work(port->wq, &port->write_tomdm_w); + pr_debug("%s: port num =%d unthrottled\n", __func__, + port->port_num); + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static void ghsic_data_write_tohost(struct work_struct *w) +{ + unsigned long flags; + struct sk_buff *skb; + int ret; + struct usb_request *req; + struct usb_ep *ep; + struct gdata_port *port; + + port = container_of(w, struct gdata_port, write_tohost_w); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->in; + + while (!list_empty(&port->tx_idle)) { + skb = __skb_dequeue(&port->tx_skb_q); + if (!skb) + break; + + req = list_first_entry(&port->tx_idle, struct usb_request, + list); + req->context = skb; + req->buf = skb->data; + req->length = skb->len; + + list_del(&req->list); + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_ep_queue(ep, req, GFP_KERNEL); + spin_lock_irqsave(&port->port_lock, flags); + if (ret) { + pr_err("%s: usb epIn failed\n", __func__); + list_add(&req->list, &port->tx_idle); + dev_kfree_skb_any(skb); + break; + } + port->to_host++; + if (ghsic_data_fctrl_support && + port->tx_skb_q.qlen <= ghsic_data_fctrl_dis_thld && + test_and_clear_bit(RX_THROTTLED, &port->brdg.flags)) { + port->rx_unthrottled_cnt++; + port->unthrottled_pnd_skbs = port->tx_skb_q.qlen; + pr_debug_ratelimited("%s: disable flow ctrl:" + " tx skbq len: %u\n", + __func__, port->tx_skb_q.qlen); + data_bridge_unthrottle_rx(port->brdg.ch_id); + } + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static int ghsic_data_receive(void *p, void *data, size_t len) +{ + struct gdata_port *port = p; + unsigned long flags; + struct sk_buff *skb = data; + + if (!port) { + dev_kfree_skb_any(skb); + return -EINVAL; + } + + pr_debug("%s: p:%p#%d skb_len:%d\n", __func__, + port, port->port_num, skb->len); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + dev_kfree_skb_any(skb); + return -ENOTCONN; + } + + __skb_queue_tail(&port->tx_skb_q, skb); + + if (ghsic_data_fctrl_support && + port->tx_skb_q.qlen >= ghsic_data_fctrl_en_thld) { + set_bit(RX_THROTTLED, &port->brdg.flags); + port->rx_throttled_cnt++; + pr_debug_ratelimited("%s: flow ctrl enabled: tx skbq len: %u\n", + __func__, port->tx_skb_q.qlen); + spin_unlock_irqrestore(&port->port_lock, flags); + queue_work(port->wq, &port->write_tohost_w); + return -EBUSY; + } + + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->write_tohost_w); + + return 0; +} + +static void ghsic_data_write_tomdm(struct work_struct *w) +{ + struct gdata_port *port; + struct sk_buff *skb; + unsigned long flags; + int ret; + + port = container_of(w, struct gdata_port, write_tomdm_w); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + if (test_bit(TX_THROTTLED, &port->brdg.flags)) { + spin_unlock_irqrestore(&port->port_lock, flags); + goto start_rx; + } + + while ((skb = __skb_dequeue(&port->rx_skb_q))) { + pr_debug("%s: port:%p tom:%lu pno:%d\n", __func__, + port, port->to_modem, port->port_num); + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = data_bridge_write(port->brdg.ch_id, skb); + spin_lock_irqsave(&port->port_lock, flags); + if (ret < 0) { + if (ret == -EBUSY) { + /*flow control*/ + port->tx_throttled_cnt++; + break; + } + pr_err_ratelimited("%s: write error:%d\n", + __func__, ret); + port->tomodem_drp_cnt++; + dev_kfree_skb_any(skb); + break; + } + port->to_modem++; + } + spin_unlock_irqrestore(&port->port_lock, flags); +start_rx: + ghsic_data_start_rx(port); +} + +static void ghsic_data_epin_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gdata_port *port = ep->driver_data; + struct sk_buff *skb = req->context; + int status = req->status; + + switch (status) { + case 0: + /* successful completion */ + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + dev_kfree_skb_any(skb); + req->buf = 0; + usb_ep_free_request(ep, req); + return; + default: + pr_err("%s: data tx ep error %d\n", __func__, status); + break; + } + + dev_kfree_skb_any(skb); + + spin_lock(&port->port_lock); + list_add_tail(&req->list, &port->tx_idle); + spin_unlock(&port->port_lock); + + queue_work(port->wq, &port->write_tohost_w); +} + +static void +ghsic_data_epout_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gdata_port *port = ep->driver_data; + struct sk_buff *skb = req->context; + int status = req->status; + int queue = 0; + + switch (status) { + case 0: + skb_put(skb, req->actual); + queue = 1; + break; + case -ECONNRESET: + case -ESHUTDOWN: + /* cable disconnection */ + dev_kfree_skb_any(skb); + req->buf = 0; + usb_ep_free_request(ep, req); + return; + default: + pr_err_ratelimited("%s: %s response error %d, %d/%d\n", + __func__, ep->name, status, + req->actual, req->length); + dev_kfree_skb_any(skb); + break; + } + + spin_lock(&port->port_lock); + if (queue) { + __skb_queue_tail(&port->rx_skb_q, skb); + list_add_tail(&req->list, &port->rx_idle); + queue_work(port->wq, &port->write_tomdm_w); + } + spin_unlock(&port->port_lock); +} + +static void ghsic_data_start_rx(struct gdata_port *port) +{ + struct usb_request *req; + struct usb_ep *ep; + unsigned long flags; + int ret; + struct sk_buff *skb; + + pr_debug("%s: port:%p\n", __func__, port); + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->out; + + while (port->port_usb && !list_empty(&port->rx_idle)) { + if (port->rx_skb_q.qlen > ghsic_data_pend_limit_with_bridge) + break; + + req = list_first_entry(&port->rx_idle, + struct usb_request, list); + + skb = alloc_skb(ghsic_data_rx_req_size, GFP_ATOMIC); + if (!skb) + break; + + list_del(&req->list); + req->buf = skb->data; + req->length = ghsic_data_rx_req_size; + req->context = skb; + + spin_unlock_irqrestore(&port->port_lock, flags); + ret = usb_ep_queue(ep, req, GFP_KERNEL); + spin_lock_irqsave(&port->port_lock, flags); + if (ret) { + dev_kfree_skb_any(skb); + + pr_err_ratelimited("%s: rx queue failed\n", __func__); + + if (port->port_usb) + list_add(&req->list, &port->rx_idle); + else + usb_ep_free_request(ep, req); + break; + } + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static void ghsic_data_start_io(struct gdata_port *port) +{ + unsigned long flags; + struct usb_ep *ep; + int ret; + + pr_debug("%s: port:%p\n", __func__, port); + + spin_lock_irqsave(&port->port_lock, flags); + if (!port->port_usb) { + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->out; + ret = ghsic_data_alloc_requests(ep, &port->rx_idle, + port->rx_q_size, ghsic_data_epout_complete, GFP_ATOMIC); + if (ret) { + pr_err("%s: rx req allocation failed\n", __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + ep = port->in; + ret = ghsic_data_alloc_requests(ep, &port->tx_idle, + port->tx_q_size, ghsic_data_epin_complete, GFP_ATOMIC); + if (ret) { + pr_err("%s: tx req allocation failed\n", __func__); + ghsic_data_free_requests(ep, &port->rx_idle); + spin_unlock_irqrestore(&port->port_lock, flags); + return; + } + + spin_unlock_irqrestore(&port->port_lock, flags); + + /* queue out requests */ + ghsic_data_start_rx(port); +} + +static void ghsic_data_connect_w(struct work_struct *w) +{ + struct gdata_port *port = + container_of(w, struct gdata_port, connect_w); + int ret; + + if (!port || !test_bit(CH_READY, &port->bridge_sts)) + return; + + pr_debug("%s: port:%p\n", __func__, port); + + ret = data_bridge_open(&port->brdg); + if (ret) { + pr_err("%s: unable open bridge ch:%d err:%d\n", + __func__, port->brdg.ch_id, ret); + return; + } + + set_bit(CH_OPENED, &port->bridge_sts); + + ghsic_data_start_io(port); +} + +static void ghsic_data_disconnect_w(struct work_struct *w) +{ + struct gdata_port *port = + container_of(w, struct gdata_port, disconnect_w); + + if (!test_bit(CH_OPENED, &port->bridge_sts)) + return; + + data_bridge_close(port->brdg.ch_id); + clear_bit(CH_OPENED, &port->bridge_sts); +} + +static void ghsic_data_free_buffers(struct gdata_port *port) +{ + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&port->port_lock, flags); + + if (!port || !port->port_usb) + goto free_buf_out; + + ghsic_data_free_requests(port->in, &port->tx_idle); + ghsic_data_free_requests(port->out, &port->rx_idle); + + while ((skb = __skb_dequeue(&port->tx_skb_q))) + dev_kfree_skb_any(skb); + + while ((skb = __skb_dequeue(&port->rx_skb_q))) + dev_kfree_skb_any(skb); + +free_buf_out: + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static int ghsic_data_probe(struct platform_device *pdev) +{ + struct gdata_port *port; + unsigned long flags; + + pr_debug("%s: name:%s no_data_ports= %d\n", + __func__, pdev->name, no_data_ports); + + if (pdev->id >= no_data_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gdata_ports[pdev->id].port; + set_bit(CH_READY, &port->bridge_sts); + + spin_lock_irqsave(&port->port_lock, flags); + /* if usb is online, try opening bridge */ + if (port->port_usb) + queue_work(port->wq, &port->connect_w); + spin_unlock_irqrestore(&port->port_lock, flags); + + return 0; +} + +/* mdm disconnect */ +static int ghsic_data_remove(struct platform_device *pdev) +{ + struct gdata_port *port; + struct usb_ep *ep_in = NULL; + struct usb_ep *ep_out = NULL; + unsigned long flags; + + pr_debug("%s: name:%s\n", __func__, pdev->name); + + if (pdev->id >= no_data_ports) { + pr_err("%s: invalid port: %d\n", __func__, pdev->id); + return -EINVAL; + } + + port = gdata_ports[pdev->id].port; + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) { + ep_in = port->in; + ep_out = port->out; + } + spin_unlock_irqrestore(&port->port_lock, flags); + + if (ep_in) + usb_ep_fifo_flush(ep_in); + if (ep_out) + usb_ep_fifo_flush(ep_out); + + ghsic_data_free_buffers(port); + + data_bridge_close(port->brdg.ch_id); + + clear_bit(CH_READY, &port->bridge_sts); + clear_bit(CH_OPENED, &port->bridge_sts); + + return 0; +} + +static void ghsic_data_port_free(int portno) +{ + struct gdata_port *port = gdata_ports[portno].port; + struct platform_driver *pdrv = &gdata_ports[portno].pdrv; + + destroy_workqueue(port->wq); + kfree(port); + + if (pdrv) + platform_driver_unregister(pdrv); +} + +static int ghsic_data_port_alloc(unsigned port_num, enum gadget_type gtype) +{ + struct gdata_port *port; + struct platform_driver *pdrv; + + port = kzalloc(sizeof(struct gdata_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + port->wq = create_singlethread_workqueue(data_bridge_names[port_num]); + if (!port->wq) { + pr_err("%s: Unable to create workqueue:%s\n", + __func__, data_bridge_names[port_num]); + kfree(port); + return -ENOMEM; + } + port->port_num = port_num; + + /* port initialization */ + spin_lock_init(&port->port_lock); + + INIT_WORK(&port->connect_w, ghsic_data_connect_w); + INIT_WORK(&port->disconnect_w, ghsic_data_disconnect_w); + INIT_WORK(&port->write_tohost_w, ghsic_data_write_tohost); + INIT_WORK(&port->write_tomdm_w, ghsic_data_write_tomdm); + + INIT_LIST_HEAD(&port->tx_idle); + INIT_LIST_HEAD(&port->rx_idle); + + skb_queue_head_init(&port->tx_skb_q); + skb_queue_head_init(&port->rx_skb_q); + + port->gtype = gtype; + port->brdg.ch_id = port_num; + port->brdg.ctx = port; + port->brdg.ops.send_pkt = ghsic_data_receive; + port->brdg.ops.unthrottle_tx = ghsic_data_unthrottle_tx; + gdata_ports[port_num].port = port; + + pdrv = &gdata_ports[port_num].pdrv; + pdrv->probe = ghsic_data_probe; + pdrv->remove = ghsic_data_remove; + pdrv->driver.name = data_bridge_names[port_num]; + pdrv->driver.owner = THIS_MODULE; + + platform_driver_register(pdrv); + + pr_debug("%s: port:%p portno:%d\n", __func__, port, port_num); + + return 0; +} + +void ghsic_data_disconnect(void *gptr, int port_num) +{ + struct gdata_port *port; + unsigned long flags; + + pr_debug("%s: port#%d\n", __func__, port_num); + + port = gdata_ports[port_num].port; + + if (port_num > no_data_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return; + } + + if (!gptr || !port) { + pr_err("%s: port is null\n", __func__); + return; + } + + ghsic_data_free_buffers(port); + + /* disable endpoints */ + if (port->in) + usb_ep_disable(port->out); + + if (port->out) + usb_ep_disable(port->in); + + spin_lock_irqsave(&port->port_lock, flags); + port->port_usb = 0; + port->in = NULL; + port->out = NULL; + clear_bit(TX_THROTTLED, &port->brdg.flags); + clear_bit(RX_THROTTLED, &port->brdg.flags); + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->disconnect_w); +} + +int ghsic_data_connect(void *gptr, int port_num) +{ + struct gdata_port *port; + struct gserial *gser; + struct grmnet *gr; + struct usb_endpoint_descriptor *in_desc; + struct usb_endpoint_descriptor *out_desc; + unsigned long flags; + int ret = 0; + + pr_debug("%s: port#%d\n", __func__, port_num); + + port = gdata_ports[port_num].port; + + if (port_num > no_data_ports) { + pr_err("%s: invalid portno#%d\n", __func__, port_num); + return -ENODEV; + } + + if (!gptr || !port) { + pr_err("%s: port is null\n", __func__); + return -ENODEV; + } + + if (port->gtype == USB_GADGET_SERIAL) { + gser = gptr; + port->in = gser->in; + port->out = gser->out; + port->tx_q_size = ghsic_data_serial_tx_q_size; + port->rx_q_size = ghsic_data_serial_rx_q_size; + gser->in->driver_data = port; + gser->out->driver_data = port; + in_desc = gser->in_desc; + out_desc = gser->out_desc; + } else { + gr = gptr; + port->in = gr->in; + port->out = gr->out; + port->tx_q_size = ghsic_data_rmnet_tx_q_size; + port->rx_q_size = ghsic_data_rmnet_rx_q_size; + gr->in->driver_data = port; + gr->out->driver_data = port; + in_desc = gr->in_desc; + out_desc = gr->out_desc; + } + + ret = usb_ep_enable(port->in, in_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", + __func__, port->in); + goto fail; + } + + ret = usb_ep_enable(port->out, out_desc); + if (ret) { + pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", + __func__, port->out); + usb_ep_disable(port->in); + goto fail; + } + spin_lock_irqsave(&port->port_lock, flags); + port->port_usb = gptr; + port->to_host = 0; + port->to_modem = 0; + port->tomodem_drp_cnt = 0; + port->rx_throttled_cnt = 0; + port->rx_unthrottled_cnt = 0; + port->tx_throttled_cnt = 0; + port->tx_unthrottled_cnt = 0; + port->unthrottled_pnd_skbs = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + + queue_work(port->wq, &port->connect_w); +fail: + return ret; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t ghsic_data_read_stats(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + struct gdata_port *port; + struct platform_driver *pdrv; + char *buf; + unsigned long flags; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < no_data_ports; i++) { + port = gdata_ports[i].port; + if (!port) + continue; + pdrv = &gdata_ports[i].pdrv; + spin_lock_irqsave(&port->port_lock, flags); + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName: %s\n" + "#PORT:%d port#: %p\n" + "dpkts_to_usbhost: %lu\n" + "dpkts_to_modem: %lu\n" + "tomodem_drp_cnt: %u\n" + "tx_buf_len: %u\n" + "rx_buf_len: %u\n" + "rx thld cnt %u\n" + "rx unthld cnt %u\n" + "tx thld cnt %u\n" + "tx unthld cnt %u\n" + "uthld pnd skbs %u\n" + "RX_THROTTLED %d\n" + "TX_THROTTLED %d\n" + "data_ch_open: %d\n" + "data_ch_ready: %d\n", + pdrv->driver.name, + i, port, + port->to_host, port->to_modem, + port->tomodem_drp_cnt, + port->tx_skb_q.qlen, + port->rx_skb_q.qlen, + port->rx_throttled_cnt, + port->rx_unthrottled_cnt, + port->tx_throttled_cnt, + port->tx_unthrottled_cnt, + port->unthrottled_pnd_skbs, + test_bit(RX_THROTTLED, &port->brdg.flags), + test_bit(TX_THROTTLED, &port->brdg.flags), + test_bit(CH_OPENED, &port->bridge_sts), + test_bit(CH_READY, &port->bridge_sts)); + + spin_unlock_irqrestore(&port->port_lock, flags); + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t ghsic_data_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct gdata_port *port; + int i; + unsigned long flags; + + for (i = 0; i < no_data_ports; i++) { + port = gdata_ports[i].port; + if (!port) + continue; + + spin_lock_irqsave(&port->port_lock, flags); + port->to_host = 0; + port->to_modem = 0; + port->tomodem_drp_cnt = 0; + port->rx_throttled_cnt = 0; + port->rx_unthrottled_cnt = 0; + port->tx_throttled_cnt = 0; + port->tx_unthrottled_cnt = 0; + port->unthrottled_pnd_skbs = 0; + spin_unlock_irqrestore(&port->port_lock, flags); + } + return count; +} + +const struct file_operations ghsic_stats_ops = { + .read = ghsic_data_read_stats, + .write = ghsic_data_reset_stats, +}; + +static struct dentry *gdata_dent; +static struct dentry *gdata_dfile; + +static void ghsic_data_debugfs_init(void) +{ + gdata_dent = debugfs_create_dir("ghsic_data_xport", 0); + if (IS_ERR(gdata_dent)) + return; + + gdata_dfile = debugfs_create_file("status", 0444, gdata_dent, 0, + &ghsic_stats_ops); + if (!gdata_dfile || IS_ERR(gdata_dfile)) + debugfs_remove(gdata_dent); +} + +static void ghsic_data_debugfs_exit(void) +{ + debugfs_remove(gdata_dfile); + debugfs_remove(gdata_dent); +} + +#else +static void ghsic_data_debugfs_init(void) { } +static void ghsic_data_debugfs_exit(void) { } + +#endif + +int ghsic_data_setup(unsigned num_ports, enum gadget_type gtype) +{ + int first_port_id = no_data_ports; + int total_num_ports = num_ports + no_data_ports; + int ret = 0; + int i; + + if (!num_ports || total_num_ports > NUM_PORTS) { + pr_err("%s: Invalid num of ports count:%d\n", + __func__, num_ports); + return -EINVAL; + } + pr_debug("%s: count: %d\n", __func__, num_ports); + + for (i = first_port_id; i < (num_ports + first_port_id); i++) { + + /*probe can be called while port_alloc,so update no_data_ports*/ + no_data_ports++; + ret = ghsic_data_port_alloc(i, gtype); + if (ret) { + no_data_ports--; + pr_err("%s: Unable to alloc port:%d\n", __func__, i); + goto free_ports; + } + } + + /*return the starting index*/ + return first_port_id; + +free_ports: + for (i = first_port_id; i < no_data_ports; i++) + ghsic_data_port_free(i); + no_data_ports = first_port_id; + + return ret; +} + +static int __init ghsic_data_init(void) +{ + ghsic_data_debugfs_init(); + + return 0; +} +module_init(ghsic_data_init); + +static void __exit ghsic_data_exit(void) +{ + ghsic_data_debugfs_exit(); +} +module_exit(ghsic_data_exit); +MODULE_DESCRIPTION("hsic data xport driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index a3be709984c..ea77ce4f191 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -67,7 +67,12 @@ struct eth_dev { spinlock_t req_lock; /* guard {rx,tx}_reqs */ struct list_head tx_reqs, rx_reqs; - atomic_t tx_qlen; + unsigned tx_qlen; +/* Minimum number of TX USB request queued to UDC */ +#define TX_REQ_THRESHOLD 5 + int no_tx_req_used; + int tx_skb_hold_count; + u32 tx_req_bufsize; struct sk_buff_head rx_frames; @@ -340,8 +345,7 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req) DBG(dev, "rx %s reset\n", ep->name); defer_kevent(dev, WORK_RX_MEMORY); quiesce: - if (skb) - dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb); goto clean; /* data overrun */ @@ -466,6 +470,11 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) { struct sk_buff *skb = req->context; struct eth_dev *dev = ep->driver_data; + struct net_device *net = dev->net; + struct usb_request *new_req; + struct usb_ep *in; + int length; + int retval; switch (req->status) { default: @@ -476,16 +485,74 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req) case -ESHUTDOWN: /* disconnect etc */ break; case 0: - dev->net->stats.tx_bytes += skb->len; + if (!req->zero) + dev->net->stats.tx_bytes += req->length-1; + else + dev->net->stats.tx_bytes += req->length; } dev->net->stats.tx_packets++; spin_lock(&dev->req_lock); - list_add(&req->list, &dev->tx_reqs); - spin_unlock(&dev->req_lock); - dev_kfree_skb_any(skb); + list_add_tail(&req->list, &dev->tx_reqs); + + if (dev->port_usb->multi_pkt_xfer) { + dev->no_tx_req_used--; + req->length = 0; + in = dev->port_usb->in_ep; + + if (!list_empty(&dev->tx_reqs)) { + new_req = container_of(dev->tx_reqs.next, + struct usb_request, list); + list_del(&new_req->list); + spin_unlock(&dev->req_lock); + if (new_req->length > 0) { + length = new_req->length; + + /* NCM requires no zlp if transfer is + * dwNtbInMaxSize */ + if (dev->port_usb->is_fixed && + length == dev->port_usb->fixed_in_len && + (length % in->maxpacket) == 0) + new_req->zero = 0; + else + new_req->zero = 1; + + /* use zlp framing on tx for strict CDC-Ether + * conformance, though any robust network rx + * path ignores extra padding. and some hardware + * doesn't like to write zlps. + */ + if (new_req->zero && !dev->zlp && + (length % in->maxpacket) == 0) { + new_req->zero = 0; + length++; + } + + new_req->length = length; + retval = usb_ep_queue(in, new_req, GFP_ATOMIC); + switch (retval) { + default: + DBG(dev, "tx queue err %d\n", retval); + break; + case 0: + spin_lock(&dev->req_lock); + dev->no_tx_req_used++; + spin_unlock(&dev->req_lock); + net->trans_start = jiffies; + } + } else { + spin_lock(&dev->req_lock); + list_add(&new_req->list, &dev->tx_reqs); + spin_unlock(&dev->req_lock); + } + } else { + spin_unlock(&dev->req_lock); + } + } else { + spin_unlock(&dev->req_lock); + dev_kfree_skb_any(skb); + } - atomic_dec(&dev->tx_qlen); if (netif_carrier_ok(dev->net)) netif_wake_queue(dev->net); } @@ -495,6 +562,26 @@ static inline int is_promisc(u16 cdc_filter) return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; } +static void alloc_tx_buffer(struct eth_dev *dev) +{ + struct list_head *act; + struct usb_request *req; + + dev->tx_req_bufsize = (TX_SKB_HOLD_THRESHOLD * + (dev->net->mtu + + sizeof(struct ethhdr) + /* size of rndis_packet_msg_type */ + + 44 + + 22)); + + list_for_each(act, &dev->tx_reqs) { + req = container_of(act, struct usb_request, list); + if (!req->buf) + req->buf = kmalloc(dev->tx_req_bufsize, + GFP_ATOMIC); + } +} + static netdev_tx_t eth_start_xmit(struct sk_buff *skb, struct net_device *net) { @@ -521,6 +608,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + /* Allocate memory for tx_reqs to support multi packet transfer */ + if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize) + alloc_tx_buffer(dev); + /* apply outgoing CDC or RNDIS filters */ if (!is_promisc(cdc_filter)) { u8 *dest = skb->data; @@ -575,11 +666,39 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, spin_unlock_irqrestore(&dev->lock, flags); if (!skb) goto drop; + } + + spin_lock_irqsave(&dev->req_lock, flags); + dev->tx_skb_hold_count++; + spin_unlock_irqrestore(&dev->req_lock, flags); + if (dev->port_usb->multi_pkt_xfer) { + memcpy(req->buf + req->length, skb->data, skb->len); + req->length = req->length + skb->len; + length = req->length; + dev_kfree_skb_any(skb); + + spin_lock_irqsave(&dev->req_lock, flags); + if (dev->tx_skb_hold_count < TX_SKB_HOLD_THRESHOLD) { + if (dev->no_tx_req_used > TX_REQ_THRESHOLD) { + list_add(&req->list, &dev->tx_reqs); + spin_unlock_irqrestore(&dev->req_lock, flags); + goto success; + } + } + + dev->no_tx_req_used++; + spin_unlock_irqrestore(&dev->req_lock, flags); + + spin_lock_irqsave(&dev->lock, flags); + dev->tx_skb_hold_count = 0; + spin_unlock_irqrestore(&dev->lock, flags); + } else { length = skb->len; + req->buf = skb->data; + req->context = skb; } - req->buf = skb->data; - req->context = skb; + req->complete = tx_complete; /* NCM requires no zlp if transfer is dwNtbInMaxSize */ @@ -594,16 +713,26 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, * though any robust network rx path ignores extra padding. * and some hardware doesn't like to write zlps. */ - if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) + if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) { + req->zero = 0; length++; + } req->length = length; /* throttle highspeed IRQ rate back slightly */ - if (gadget_is_dualspeed(dev->gadget)) - req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH) - ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) - : 0; + if (gadget_is_dualspeed(dev->gadget) && + (dev->gadget->speed == USB_SPEED_HIGH)) { + dev->tx_qlen++; + if (dev->tx_qlen == (qmult/2)) { + req->no_interrupt = 0; + dev->tx_qlen = 0; + } else { + req->no_interrupt = 1; + } + } else { + req->no_interrupt = 0; + } retval = usb_ep_queue(in, req, GFP_ATOMIC); switch (retval) { @@ -612,11 +741,11 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, break; case 0: net->trans_start = jiffies; - atomic_inc(&dev->tx_qlen); } if (retval) { - dev_kfree_skb_any(skb); + if (!dev->port_usb->multi_pkt_xfer) + dev_kfree_skb_any(skb); drop: dev->net->stats.tx_dropped++; spin_lock_irqsave(&dev->req_lock, flags); @@ -625,6 +754,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, list_add(&req->list, &dev->tx_reqs); spin_unlock_irqrestore(&dev->req_lock, flags); } +success: return NETDEV_TX_OK; } @@ -638,7 +768,7 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) rx_fill(dev, gfp_flags); /* and open the tx floodgates */ - atomic_set(&dev->tx_qlen, 0); + dev->tx_qlen = 0; netif_wake_queue(dev->net); } @@ -693,7 +823,7 @@ static int eth_stop(struct net_device *net) usb_ep_disable(link->in_ep); usb_ep_disable(link->out_ep); if (netif_carrier_ok(net)) { - ERROR(dev, "host still using in/out endpoints\n"); + DBG(dev, "host still using in/out endpoints\n"); usb_ep_enable(link->in_ep, link->in); usb_ep_enable(link->out_ep, link->out); } @@ -790,10 +920,8 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN], struct net_device *net; int status; - if (the_dev) { - memcpy(ethaddr, the_dev->host_mac, ETH_ALEN); - return 0; - } + if (the_dev) + return -EBUSY; net = alloc_etherdev(sizeof *dev); if (!net) @@ -826,12 +954,6 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN], SET_ETHTOOL_OPS(net, &ops); - /* two kinds of host-initiated state changes: - * - iff DATA transfer is active, carrier is "on" - * - tx queueing enabled if open *and* carrier is "on" - */ - netif_carrier_off(net); - dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); SET_NETDEV_DEVTYPE(net, &gadget_type); @@ -845,6 +967,12 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN], INFO(dev, "HOST MAC %pM\n", dev->host_mac); the_dev = dev; + + /* two kinds of host-initiated state changes: + * - iff DATA transfer is active, carrier is "on" + * - tx queueing enabled if open *and* carrier is "on" + */ + netif_carrier_off(net); } return status; @@ -921,6 +1049,9 @@ struct net_device *gether_connect(struct gether *link) dev->wrap = link->wrap; spin_lock(&dev->lock); + dev->tx_skb_hold_count = 0; + dev->no_tx_req_used = 0; + dev->tx_req_bufsize = 0; dev->port_usb = link; link->ioport = dev; if (netif_running(dev->net)) { @@ -986,6 +1117,8 @@ void gether_disconnect(struct gether *link) list_del(&req->list); spin_unlock(&dev->req_lock); + if (link->multi_pkt_xfer) + kfree(req->buf); usb_ep_free_request(link->in_ep, req); spin_lock(&dev->req_lock); } diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h index 64b65f92168..09b6cccbac7 100644 --- a/drivers/usb/gadget/u_ether.h +++ b/drivers/usb/gadget/u_ether.h @@ -66,6 +66,9 @@ struct gether { bool is_fixed; u32 fixed_out_len; u32 fixed_in_len; +/* Max number of SKB packets to be used to create Multi Packet RNDIS */ +#define TX_SKB_HOLD_THRESHOLD 3 + bool multi_pkt_xfer; struct sk_buff *(*wrap)(struct gether *port, struct sk_buff *skb); int (*unwrap)(struct gether *port, diff --git a/drivers/usb/gadget/u_rmnet.h b/drivers/usb/gadget/u_rmnet.h index 3c21316d71c..fd1e124fa89 100644 --- a/drivers/usb/gadget/u_rmnet.h +++ b/drivers/usb/gadget/u_rmnet.h @@ -35,28 +35,23 @@ struct grmnet { /* to usb host, aka laptop, windows pc etc. Will * be filled by usb driver of rmnet functionality */ - int (*send_cpkt_response)(struct grmnet *g, - struct rmnet_ctrl_pkt *pkt); + int (*send_cpkt_response)(void *g, void *buf, size_t len); /* to modem, and to be filled by driver implementing * control function */ - int (*send_cpkt_request)(struct grmnet *g, - u8 port_num, - struct rmnet_ctrl_pkt *pkt); + int (*send_encap_cmd)(u8 port_num, void *buf, size_t len); - void (*send_cbits_tomodem)(struct grmnet *g, - u8 port_num, - int cbits); + void (*notify_modem)(void *g, u8 port_num, int cbits); void (*disconnect)(struct grmnet *g); void (*connect)(struct grmnet *g); }; -int gbam_setup(unsigned int count); -int gbam_connect(struct grmnet *, u8 port_num); -void gbam_disconnect(struct grmnet *, u8 port_num); - +int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port); +int gbam_connect(struct grmnet *gr, u8 port_num, + enum transport_type trans, u8 connection_idx); +void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans); int gsmd_ctrl_connect(struct grmnet *gr, int port_num); void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num); int gsmd_ctrl_setup(unsigned int count); diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c index 68b7bb8a547..d42ac935029 100644 --- a/drivers/usb/gadget/u_rmnet_ctrl_smd.c +++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -59,7 +59,7 @@ struct rmnet_ctrl_port { struct grmnet *port_usb; spinlock_t port_lock; - struct work_struct connect_w; + struct delayed_work connect_w; }; static struct rmnet_ctrl_ports { @@ -83,6 +83,7 @@ static struct rmnet_ctrl_pkt *alloc_rmnet_ctrl_pkt(unsigned len, gfp_t flags) kfree(pkt); return ERR_PTR(-ENOMEM); } + pkt->len = len; return pkt; @@ -103,35 +104,37 @@ static void grmnet_ctrl_smd_read_w(struct work_struct *w) struct smd_ch_info *c = container_of(w, struct smd_ch_info, read_w); struct rmnet_ctrl_port *port = c->port; int sz; - struct rmnet_ctrl_pkt *cpkt; + size_t len; + void *buf; unsigned long flags; - while (1) { + spin_lock_irqsave(&port->port_lock, flags); + while (c->ch) { sz = smd_cur_packet_size(c->ch); - if (sz == 0) + if (sz <= 0) break; if (smd_read_avail(c->ch) < sz) break; - cpkt = alloc_rmnet_ctrl_pkt(sz, GFP_KERNEL); - if (IS_ERR(cpkt)) { - pr_err("%s: unable to allocate rmnet control pkt\n", - __func__); + spin_unlock_irqrestore(&port->port_lock, flags); + + buf = kmalloc(sz, GFP_KERNEL); + if (!buf) return; - } - cpkt->len = smd_read(c->ch, cpkt->buf, sz); + + len = smd_read(c->ch, buf, sz); /* send it to USB here */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb && port->port_usb->send_cpkt_response) { - port->port_usb->send_cpkt_response( - port->port_usb, - cpkt); + port->port_usb->send_cpkt_response(port->port_usb, + buf, len); c->to_host++; } - spin_unlock_irqrestore(&port->port_lock, flags); + kfree(buf); } + spin_unlock_irqrestore(&port->port_lock, flags); } static void grmnet_ctrl_smd_write_w(struct work_struct *w) @@ -143,7 +146,7 @@ static void grmnet_ctrl_smd_write_w(struct work_struct *w) int ret; spin_lock_irqsave(&port->port_lock, flags); - while (1) { + while (c->ch) { if (list_empty(&c->tx_q)) break; @@ -157,8 +160,7 @@ static void grmnet_ctrl_smd_write_w(struct work_struct *w) ret = smd_write(c->ch, cpkt->buf, cpkt->len); spin_lock_irqsave(&port->port_lock, flags); if (ret != cpkt->len) { - pr_err("%s: smd_write failed err:%d\n", - __func__, ret); + pr_err("%s: smd_write failed err:%d\n", __func__, ret); free_rmnet_ctrl_pkt(cpkt); break; } @@ -169,24 +171,29 @@ static void grmnet_ctrl_smd_write_w(struct work_struct *w) } static int -grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno, - struct rmnet_ctrl_pkt *cpkt) +grmnet_ctrl_smd_send_cpkt_tomodem(u8 portno, + void *buf, size_t len) { unsigned long flags; struct rmnet_ctrl_port *port; struct smd_ch_info *c; + struct rmnet_ctrl_pkt *cpkt; if (portno >= n_rmnet_ctrl_ports) { pr_err("%s: Invalid portno#%d\n", __func__, portno); return -ENODEV; } - if (!gr) { - pr_err("%s: grmnet is null\n", __func__); - return -ENODEV; + port = ctrl_smd_ports[portno].port; + + cpkt = alloc_rmnet_ctrl_pkt(len, GFP_ATOMIC); + if (IS_ERR(cpkt)) { + pr_err("%s: Unable to allocate ctrl pkt\n", __func__); + return -ENOMEM; } - port = ctrl_smd_ports[portno].port; + memcpy(cpkt->buf, buf, len); + cpkt->len = len; spin_lock_irqsave(&port->port_lock, flags); c = &port->ctrl_ch; @@ -207,7 +214,7 @@ grmnet_ctrl_smd_send_cpkt_tomodem(struct grmnet *gr, u8 portno, #define RMNET_CTRL_DTR 0x01 static void -gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits) +gsmd_ctrl_send_cbits_tomodem(void *gptr, u8 portno, int cbits) { struct rmnet_ctrl_port *port; struct smd_ch_info *c; @@ -220,7 +227,7 @@ gsmd_ctrl_send_cbits_tomodem(struct grmnet *gr, u8 portno, int cbits) return; } - if (!gr) { + if (!gptr) { pr_err("%s: grmnet is null\n", __func__); return; } @@ -316,7 +323,7 @@ static void grmnet_ctrl_smd_notify(void *p, unsigned event) static void grmnet_ctrl_smd_connect_w(struct work_struct *w) { struct rmnet_ctrl_port *port = - container_of(w, struct rmnet_ctrl_port, connect_w); + container_of(w, struct rmnet_ctrl_port, connect_w.work); struct smd_ch_info *c = &port->ctrl_ch; unsigned long flags; int ret; @@ -328,17 +335,22 @@ static void grmnet_ctrl_smd_connect_w(struct work_struct *w) ret = smd_open(c->name, &c->ch, port, grmnet_ctrl_smd_notify); if (ret) { - pr_err("%s: Unable to open smd ch:%s err:%d\n", - __func__, c->name, ret); + if (ret == -EAGAIN) { + /* port not ready - retry */ + pr_debug("%s: SMD port not ready - rescheduling:%s err:%d\n", + __func__, c->name, ret); + queue_delayed_work(grmnet_ctrl_wq, &port->connect_w, + msecs_to_jiffies(250)); + } else { + pr_err("%s: unable to open smd port:%s err:%d\n", + __func__, c->name, ret); + } return; } spin_lock_irqsave(&port->port_lock, flags); - if (port->port_usb) { - pr_warning("%s: SMD notify closing before open\n", __func__); - c->cbits_tomodem |= TIOCM_RTS; + if (port->port_usb) smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem); - } spin_unlock_irqrestore(&port->port_lock, flags); } @@ -365,11 +377,11 @@ int gsmd_ctrl_connect(struct grmnet *gr, int port_num) spin_lock_irqsave(&port->port_lock, flags); port->port_usb = gr; - gr->send_cpkt_request = grmnet_ctrl_smd_send_cpkt_tomodem; - gr->send_cbits_tomodem = gsmd_ctrl_send_cbits_tomodem; + gr->send_encap_cmd = grmnet_ctrl_smd_send_cpkt_tomodem; + gr->notify_modem = gsmd_ctrl_send_cbits_tomodem; spin_unlock_irqrestore(&port->port_lock, flags); - queue_work(grmnet_ctrl_wq, &port->connect_w); + queue_delayed_work(grmnet_ctrl_wq, &port->connect_w, 0); return 0; } @@ -398,8 +410,8 @@ void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num) spin_lock_irqsave(&port->port_lock, flags); port->port_usb = 0; - gr->send_cpkt_request = 0; - gr->send_cbits_tomodem = 0; + gr->send_encap_cmd = 0; + gr->notify_modem = 0; c->cbits_tomodem = 0; while (!list_empty(&c->tx_q)) { @@ -411,10 +423,13 @@ void gsmd_ctrl_disconnect(struct grmnet *gr, u8 port_num) spin_unlock_irqrestore(&port->port_lock, flags); - if (test_bit(CH_OPENED, &c->flags)) { - /* this should send the dtr zero */ + if (test_and_clear_bit(CH_OPENED, &c->flags)) + /* send dtr zero */ + smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem); + + if (c->ch) { smd_close(c->ch); - clear_bit(CH_OPENED, &c->flags); + c->ch = NULL; } } @@ -438,7 +453,8 @@ static int grmnet_ctrl_smd_ch_probe(struct platform_device *pdev) /* if usb is online, try opening smd_ch */ spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) - queue_work(grmnet_ctrl_wq, &port->connect_w); + queue_delayed_work(grmnet_ctrl_wq, + &port->connect_w, 0); spin_unlock_irqrestore(&port->port_lock, flags); break; @@ -463,7 +479,10 @@ static int grmnet_ctrl_smd_ch_remove(struct platform_device *pdev) if (!strncmp(c->name, pdev->name, SMD_CH_MAX_LEN)) { clear_bit(CH_READY, &c->flags); clear_bit(CH_OPENED, &c->flags); - smd_close(c->ch); + if (c->ch) { + smd_close(c->ch); + c->ch = NULL; + } break; } } @@ -496,7 +515,7 @@ static int grmnet_ctrl_smd_port_alloc(int portno) port->port_num = portno; spin_lock_init(&port->port_lock); - INIT_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w); + INIT_DELAYED_WORK(&port->connect_w, grmnet_ctrl_smd_connect_w); c = &port->ctrl_ch; c->name = rmnet_ctrl_names[portno]; @@ -543,12 +562,13 @@ int gsmd_ctrl_setup(unsigned int count) } for (i = 0; i < count; i++) { + n_rmnet_ctrl_ports++; ret = grmnet_ctrl_smd_port_alloc(i); if (ret) { pr_err("%s: Unable to alloc port:%d\n", __func__, i); + n_rmnet_ctrl_ports--; goto free_ctrl_smd_ports; } - n_rmnet_ctrl_ports++; } return 0; @@ -601,8 +621,8 @@ static ssize_t gsmd_ctrl_read_stats(struct file *file, char __user *ubuf, c->cbits_tomodem ? "HIGH" : "LOW", test_bit(CH_OPENED, &c->flags), test_bit(CH_READY, &c->flags), - smd_read_avail(c->ch), - smd_write_avail(c->ch)); + c->ch ? smd_read_avail(c->ch) : 0, + c->ch ? smd_write_avail(c->ch) : 0); spin_unlock_irqrestore(&port->port_lock, flags); } diff --git a/drivers/usb/gadget/u_sdio.c b/drivers/usb/gadget/u_sdio.c index 056920aeaf4..8ff4c4bd461 100644 --- a/drivers/usb/gadget/u_sdio.c +++ b/drivers/usb/gadget/u_sdio.c @@ -232,7 +232,7 @@ int gsdio_write(struct gsdio_port *port, struct usb_request *req) { unsigned avail; char *packet; - unsigned size; + unsigned size = req->actual; unsigned n; int ret = 0; @@ -248,8 +248,6 @@ int gsdio_write(struct gsdio_port *port, struct usb_request *req) return -ENODEV; } - size = req->actual; - packet = req->buf; pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n", __func__, port, port->port_num, req, req->actual, port->n_read); @@ -536,20 +534,6 @@ void gsdio_tx_pull(struct work_struct *w) goto tx_pull_end; } - /* Do not send data if DTR is not set */ - if (!(port->cbits_to_modem & TIOCM_DTR)) { - pr_info("%s: DTR low. flush %d bytes.", __func__, avail); - /* check if usb is still active */ - if (!port->port_usb) { - gsdio_free_req(in, req); - } else { - list_add(&req->list, pool); - port->wp_len++; - } - goto tx_pull_end; - } - - req->length = avail; spin_unlock_irq(&port->port_lock); @@ -655,10 +639,11 @@ void gsdio_ctrl_wq(struct work_struct *w) port->cbits_to_modem, ~(port->cbits_to_modem)); } -void gsdio_ctrl_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits) +void gsdio_ctrl_notify_modem(void *gptr, u8 portno, int ctrl_bits) { struct gsdio_port *port; int temp; + struct gserial *gser = gptr; if (portno >= n_sdio_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); @@ -1146,8 +1131,8 @@ int gsdio_setup(struct usb_gadget *g, unsigned count) for (i = 0; i < count; i++) { mutex_init(&sdio_ports[i].lock); - n_sdio_ports++; ret = gsdio_port_alloc(i, &coding, sport_info + i); + n_sdio_ports++; if (ret) { n_sdio_ports--; pr_err("%s: sdio logical port allocation failed\n", diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index bde6a3f322b..c9da4bd78c0 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c @@ -55,7 +55,7 @@ * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. */ -#define PREFIX "ttyHSUSB" +#define PREFIX "ttyGS" /* * gserial is the lifecycle interface, used by USB functions @@ -371,16 +371,11 @@ __acquires(&port->port_lock) */ { struct list_head *pool = &port->write_pool; - struct usb_ep *in; + struct usb_ep *in = port->port_usb->in; int status = 0; static long prev_len; bool do_tty_wake = false; - if (port->port_usb) - in = port->port_usb->in; - else - return 0; - while (!list_empty(pool)) { struct usb_request *req; int len; @@ -391,7 +386,9 @@ __acquires(&port->port_lock) req = list_entry(pool->next, struct usb_request, list); len = gs_send_packet(port, req->buf, TX_BUF_SIZE); if (len == 0) { - /* Queue zero length packet */ + /* Queue zero length packet explicitly to make it + * work with UDCs which don't support req->zero flag + */ if (prev_len && (prev_len % in->maxpacket == 0)) { req->length = 0; list_del(&req->list); @@ -416,7 +413,6 @@ __acquires(&port->port_lock) req->length = len; list_del(&req->list); - req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0); pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", port->port_num, len, *((u8 *)req->buf), @@ -468,14 +464,9 @@ __acquires(&port->port_lock) */ { struct list_head *pool = &port->read_pool; - struct usb_ep *out; + struct usb_ep *out = port->port_usb->out; unsigned started = 0; - if (port->port_usb) - out = port->port_usb->out; - else - return 0; - while (!list_empty(pool)) { struct usb_request *req; int status; @@ -736,15 +727,10 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; - struct usb_ep *ep; + struct usb_ep *ep = port->port_usb->out; int status; unsigned started; - if (port->port_usb) - ep = port->port_usb->out; - else - return -EIO; - /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different @@ -1361,9 +1347,6 @@ int gserial_setup(struct usb_gadget *g, unsigned count) if (count == 0 || count > N_PORTS) return -EINVAL; - if (gs_tty_driver) - return 0; - gs_tty_driver = alloc_tty_driver(count); if (!gs_tty_driver) return -ENOMEM; @@ -1388,10 +1371,6 @@ int gserial_setup(struct usb_gadget *g, unsigned count) gs_tty_driver->init_termios.c_ispeed = 9600; gs_tty_driver->init_termios.c_ospeed = 9600; - gs_tty_driver->init_termios.c_lflag = 0; - gs_tty_driver->init_termios.c_iflag = 0; - gs_tty_driver->init_termios.c_oflag = 0; - coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h index fea53d8cee5..c9370068a7a 100644 --- a/drivers/usb/gadget/u_serial.h +++ b/drivers/usb/gadget/u_serial.h @@ -55,7 +55,7 @@ struct gserial { int (*send_modem_ctrl_bits)(struct gserial *p, int ctrl_bits); /* notification changes to modem */ - void (*notify_modem)(struct gserial *gser, u8 portno, int ctrl_bits); + void (*notify_modem)(void *gser, u8 portno, int ctrl_bits); }; /* utilities to allocate/free request and buffer */ diff --git a/drivers/usb/gadget/u_smd.c b/drivers/usb/gadget/u_smd.c index fe55363a8c1..3de82b37fe6 100644 --- a/drivers/usb/gadget/u_smd.c +++ b/drivers/usb/gadget/u_smd.c @@ -71,7 +71,7 @@ struct gsmd_port { struct gserial *port_usb; struct smd_port_info *pi; - struct work_struct connect_work; + struct delayed_work connect_work; /* At present, smd does not notify * control bit change info from modem @@ -209,6 +209,7 @@ static void gsmd_start_rx(struct gsmd_port *port) static void gsmd_rx_push(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, push); + struct smd_port_info *pi = port->pi; struct list_head *q; pr_debug("%s: port:%p port#%d", __func__, port, port->port_num); @@ -216,10 +217,9 @@ static void gsmd_rx_push(struct work_struct *w) spin_lock_irq(&port->port_lock); q = &port->read_queue; - while (!list_empty(q)) { + while (pi->ch && !list_empty(q)) { struct usb_request *req; int avail; - struct smd_port_info *pi = port->pi; req = list_first_entry(q, struct usb_request, list); @@ -245,7 +245,7 @@ static void gsmd_rx_push(struct work_struct *w) char *packet = req->buf; unsigned size = req->actual; unsigned n; - int count; + int count; n = port->n_read; if (n) { @@ -296,21 +296,24 @@ static void gsmd_tx_pull(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, pull); struct list_head *pool = &port->write_pool; + struct smd_port_info *pi = port->pi; + struct usb_ep *in; pr_debug("%s: port:%p port#%d pool:%p\n", __func__, port, port->port_num, pool); + spin_lock_irq(&port->port_lock); + if (!port->port_usb) { pr_debug("%s: usb is disconnected\n", __func__); + spin_unlock_irq(&port->port_lock); gsmd_read_pending(port); return; } - spin_lock_irq(&port->port_lock); - while (!list_empty(pool)) { + in = port->port_usb->in; + while (pi->ch && !list_empty(pool)) { struct usb_request *req; - struct usb_ep *in = port->port_usb->in; - struct smd_port_info *pi = port->pi; int avail; int ret; @@ -562,7 +565,7 @@ static void gsmd_connect_work(struct work_struct *w) struct smd_port_info *pi; int ret; - port = container_of(w, struct gsmd_port, connect_work); + port = container_of(w, struct gsmd_port, connect_work.work); pi = port->pi; pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num); @@ -573,16 +576,24 @@ static void gsmd_connect_work(struct work_struct *w) ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM, &pi->ch, port, gsmd_notify); if (ret) { - pr_err("%s: unable to open smd port:%s err:%d\n", - __func__, pi->name, ret); - return; + if (ret == -EAGAIN) { + /* port not ready - retry */ + pr_debug("%s: SMD port not ready - rescheduling:%s err:%d\n", + __func__, pi->name, ret); + queue_delayed_work(gsmd_wq, &port->connect_work, + msecs_to_jiffies(250)); + } else { + pr_err("%s: unable to open smd port:%s err:%d\n", + __func__, pi->name, ret); + } } } -static void gsmd_notify_modem(struct gserial *gser, u8 portno, int ctrl_bits) +static void gsmd_notify_modem(void *gptr, u8 portno, int ctrl_bits) { struct gsmd_port *port; int temp; + struct gserial *gser = gptr; if (portno >= n_smd_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); @@ -671,7 +682,7 @@ int gsmd_connect(struct gserial *gser, u8 portno) } gser->out->driver_data = port; - queue_work(gsmd_wq, &port->connect_work); + queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(0)); return 0; } @@ -710,17 +721,18 @@ void gsmd_disconnect(struct gserial *gser, u8 portno) port->n_read = 0; spin_unlock_irqrestore(&port->port_lock, flags); - if (!test_bit(CH_OPENED, &port->pi->flags)) - return; - - /* lower the dtr */ - port->cbits_to_modem = 0; - smd_tiocmset(port->pi->ch, - port->cbits_to_modem, - ~port->cbits_to_modem); + if (test_and_clear_bit(CH_OPENED, &port->pi->flags)) { + /* lower the dtr */ + port->cbits_to_modem = 0; + smd_tiocmset(port->pi->ch, + port->cbits_to_modem, + ~port->cbits_to_modem); + } - smd_close(port->pi->ch); - clear_bit(CH_OPENED, &port->pi->flags); + if (port->pi->ch) { + smd_close(port->pi->ch); + port->pi->ch = NULL; + } } #define SMD_CH_MAX_LEN 20 @@ -741,7 +753,8 @@ static int gsmd_ch_probe(struct platform_device *pdev) set_bit(CH_READY, &pi->flags); spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) - queue_work(gsmd_wq, &port->connect_work); + queue_delayed_work(gsmd_wq, &port->connect_work, + msecs_to_jiffies(0)); spin_unlock_irqrestore(&port->port_lock, flags); break; } @@ -764,7 +777,10 @@ static int gsmd_ch_remove(struct platform_device *pdev) if (!strncmp(pi->name, pdev->name, SMD_CH_MAX_LEN)) { clear_bit(CH_READY, &pi->flags); clear_bit(CH_OPENED, &pi->flags); - smd_close(pi->ch); + if (pi->ch) { + smd_close(pi->ch); + pi->ch = NULL; + } break; } } @@ -800,7 +816,7 @@ static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding) INIT_LIST_HEAD(&port->write_pool); INIT_WORK(&port->pull, gsmd_tx_pull); - INIT_WORK(&port->connect_work, gsmd_connect_work); + INIT_DELAYED_WORK(&port->connect_work, gsmd_connect_work); smd_ports[portno].port = port; pdrv = &smd_ports[portno].pdrv; @@ -820,6 +836,7 @@ static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gsmd_port *port; + struct smd_port_info *pi; char *buf; unsigned long flags; int temp = 0; @@ -832,6 +849,7 @@ static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; + pi = port->pi; spin_lock_irqsave(&port->port_lock, flags); temp += scnprintf(buf + temp, 512 - temp, "###PORT:%d###\n" @@ -847,10 +865,10 @@ static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, i, port->nbytes_tolaptop, port->nbytes_tomodem, port->cbits_to_modem, port->cbits_to_laptop, port->n_read, - smd_read_avail(port->pi->ch), - smd_write_avail(port->pi->ch), - test_bit(CH_OPENED, &port->pi->flags), - test_bit(CH_READY, &port->pi->flags)); + pi->ch ? smd_read_avail(pi->ch) : 0, + pi->ch ? smd_write_avail(pi->ch) : 0, + test_bit(CH_OPENED, &pi->flags), + test_bit(CH_READY, &pi->flags)); spin_unlock_irqrestore(&port->port_lock, flags); } diff --git a/drivers/usb/gadget/u_xpst.c b/drivers/usb/gadget/u_xpst.c index 7df4ab6eeb9..66027741f1d 100644 --- a/drivers/usb/gadget/u_xpst.c +++ b/drivers/usb/gadget/u_xpst.c @@ -11,6 +11,9 @@ * GNU General Public License for more details. * */ +#if defined(CONFIG_MACH_MECHA) +#include +#endif struct diag_context _context; static struct usb_diag_ch *legacych; @@ -367,7 +370,7 @@ static long htc_diag_ioctl(struct file *file, unsigned int cmd, unsigned long ar diag_smd_enable(driver->ch, "diag_ioctl", tmp_value); #if defined(CONFIG_MACH_MECHA) /* internal hub*/ - /*smsc251x_mdm_port_sw(tmp_value);*/ + smsc251x_mdm_port_sw(tmp_value); #endif /* force diag_read to return error when disable diag */ if (tmp_value == 0) diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h index 5b7919460fd..01a23c1197f 100644 --- a/drivers/usb/gadget/uvc.h +++ b/drivers/usb/gadget/uvc.h @@ -29,7 +29,7 @@ struct uvc_request_data { - unsigned int length; + __s32 length; __u8 data[60]; }; diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c index 5e807f083bc..992f66b88c8 100644 --- a/drivers/usb/gadget/uvc_v4l2.c +++ b/drivers/usb/gadget/uvc_v4l2.c @@ -41,7 +41,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) if (data->length < 0) return usb_ep_set_halt(cdev->gadget->ep0); - req->length = min(uvc->event_length, data->length); + req->length = min_t(unsigned int, uvc->event_length, data->length); req->zero = data->length < uvc->event_length; req->dma = DMA_ADDR_INVALID; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index f380bf97e5a..fc93d57609a 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver, */ if (pdata->init && pdata->init(pdev)) { retval = -ENODEV; - goto err3; + goto err4; } /* Enable USB controller, 83xx or 8536 */ @@ -216,6 +216,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci, unsigned int port_offset) { u32 portsc; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + void __iomem *non_ehci = hcd->regs; portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]); portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW); @@ -231,6 +233,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci, portsc |= PORT_PTS_PTW; /* fall through */ case FSL_USB2_PHY_UTMI: + /* enable UTMI PHY */ + setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN); portsc |= PORT_PTS_UTMI; break; case FSL_USB2_PHY_NONE: diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h index 49180622116..bea5013ab7f 100644 --- a/drivers/usb/host/ehci-fsl.h +++ b/drivers/usb/host/ehci-fsl.h @@ -45,5 +45,6 @@ #define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */ #define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */ #define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */ +#define CTRL_UTMI_PHY_EN (1<<9) #define SNOOP_SIZE_2GB 0x1e #endif /* _EHCI_FSL_H */ diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index dc11eaf2913..862514f2c6a 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -792,8 +792,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) goto dead; } + /* + * We don't use STS_FLR, but some controllers don't like it to + * remain on, so mask it out along with the other status bits. + */ + masked_status = status & (INTR_MASK | STS_FLR); + /* Shared IRQ? */ - masked_status = status & INTR_MASK; if (!masked_status || unlikely(hcd->state == HC_STATE_HALT)) { spin_unlock(&ehci->lock); return IRQ_NONE; @@ -844,7 +849,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) pcd_status = status; /* resume root hub? */ - if (!(cmd & CMD_RUN)) + if (hcd->state == HC_STATE_SUSPENDED) usb_hcd_resume_root_hub(hcd); /* get per-port change detect bits */ diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 55a57c23dd0..028c57212fe 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -321,7 +321,7 @@ static const struct hc_driver ehci_omap_hc_driver = { .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; -MODULE_ALIAS("platform:omap-ehci"); +MODULE_ALIAS("platform:ehci-omap"); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_AUTHOR("Felipe Balbi "); diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 1d1caa6a33f..175c574ec9f 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -357,7 +357,10 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) { return pdev->class == PCI_CLASS_SERIAL_USB_EHCI && pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == 0x1E26; + (pdev->device == 0x1E26 || + pdev->device == 0x8C2D || + pdev->device == 0x8C26 || + pdev->device == 0x9C26); } static void ehci_enable_xhci_companion(void) diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 9c24ff441d5..458cddbdcba 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -130,9 +130,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) else { qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list); - /* first qtd may already be partially processed */ - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) + /* + * first qtd may already be partially processed. + * If we come here during unlink, the QH overlay region + * might have reference to the just unlinked qtd. The + * qtd is updated in qh_completions(). Update the QH + * overlay here. + */ + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) { + qh->hw->hw_qtd_next = qtd->hw_next; qtd = NULL; + } } if (qtd) @@ -649,7 +657,7 @@ qh_urb_transaction ( /* * data transfer stage: buffer setup */ - i = urb->num_sgs; + i = urb->num_mapped_sgs; if (len > 0 && i > 0) { sg = urb->sg; buf = sg_dma_address(sg); diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 2542ca013a2..bd727626e23 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -215,7 +215,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index f9cf3f04b74..23107e23053 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd) struct ohci_hcd *ohci; ohci = hcd_to_ohci (hcd); - ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); - ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); + ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable); - /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ - ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? - OHCI_CTRL_RWC | OHCI_CTRL_HCFS : - OHCI_CTRL_RWC); - ohci_writel(ohci, ohci->hc_control, &ohci->regs->control); + /* Software reset, after which the controller goes into SUSPEND */ + ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus); + ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */ + udelay(10); - /* flush the writes */ - (void) ohci_readl (ohci, &ohci->regs->control); + ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval); } static int check_ed(struct ohci_hcd *ohci, struct ed *ed) diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index ad8166c681e..bc01b064585 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } -/* nVidia controllers continue to drive Reset signalling on the bus - * even after system shutdown, wasting power. This flag tells the - * shutdown routine to leave the controller OPERATIONAL instead of RESET. - */ -static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) -{ - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - struct ohci_hcd *ohci = hcd_to_ohci(hcd); - - /* Evidently nVidia fixed their later hardware; this is a guess at - * the changeover point. - */ -#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d - - if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) { - ohci->flags |= OHCI_QUIRK_SHUTDOWN; - ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); - } - - return 0; -} - static void sb800_prefetch(struct ohci_hcd *ohci, int on) { struct pci_dev *pdev; @@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = { PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), .driver_data = (unsigned long)ohci_quirk_amd700, }, - { - PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), - .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown, - }, /* FIXME for some of the early AMD 760 southbridges, OHCI * won't work at all. blacklist them. diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index dd24fc115e4..e66eb299a27 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -1130,6 +1130,25 @@ dl_done_list (struct ohci_hcd *ohci) while (td) { struct td *td_next = td->next_dl_td; + struct ed *ed = td->ed; + + /* + * Some OHCI controllers (NVIDIA for sure, maybe others) + * occasionally forget to add TDs to the done queue. Since + * TDs for a given endpoint are always processed in order, + * if we find a TD on the donelist then all of its + * predecessors must be finished as well. + */ + for (;;) { + struct td *td2; + + td2 = list_first_entry(&ed->td_list, struct td, + td_list); + if (td2 == td) + break; + takeback_td(ohci, td2); + } + takeback_td(ohci, td); td = td_next; } diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 35e5fd640ce..0795b934d00 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -403,7 +403,6 @@ struct ohci_hcd { #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ -#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */ // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a495d489918..0f097d366eb 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -36,6 +36,7 @@ #define OHCI_INTRENABLE 0x10 #define OHCI_INTRDISABLE 0x14 #define OHCI_FMINTERVAL 0x34 +#define OHCI_HCFS (3 << 6) /* hc functional state */ #define OHCI_HCR (1 << 0) /* host controller reset */ #define OHCI_OCR (1 << 3) /* ownership change request */ #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ @@ -72,7 +73,9 @@ #define NB_PIF0_PWRDOWN_1 0x01100013 #define USB_INTEL_XUSB2PR 0xD0 +#define USB_INTEL_USB2PRM 0xD4 #define USB_INTEL_USB3_PSSEN 0xD8 +#define USB_INTEL_USB3PRM 0xDC static struct amd_chipset_info { struct pci_dev *nb_dev; @@ -465,6 +468,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) { void __iomem *base; u32 control; + u32 fminterval; + int cnt; if (!mmio_resource_enabled(pdev, 0)) return; @@ -497,41 +502,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) } #endif - /* reset controller, preserving RWC (and possibly IR) */ - writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); - readl(base + OHCI_CONTROL); + /* disable interrupts */ + writel((u32) ~0, base + OHCI_INTRDISABLE); - /* Some NVIDIA controllers stop working if kept in RESET for too long */ - if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { - u32 fminterval; - int cnt; + /* Reset the USB bus, if the controller isn't already in RESET */ + if (control & OHCI_HCFS) { + /* Go into RESET, preserving RWC (and possibly IR) */ + writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); + readl(base + OHCI_CONTROL); - /* drive reset for at least 50 ms (7.1.7.5) */ + /* drive bus reset for at least 50 ms (7.1.7.5) */ msleep(50); + } - /* software reset of the controller, preserving HcFmInterval */ - fminterval = readl(base + OHCI_FMINTERVAL); - writel(OHCI_HCR, base + OHCI_CMDSTATUS); - - /* reset requires max 10 us delay */ - for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ - if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) - break; - udelay(1); - } - writel(fminterval, base + OHCI_FMINTERVAL); + /* software reset of the controller, preserving HcFmInterval */ + fminterval = readl(base + OHCI_FMINTERVAL); + writel(OHCI_HCR, base + OHCI_CMDSTATUS); - /* Now we're in the SUSPEND state with all devices reset - * and wakeups and interrupts disabled - */ + /* reset requires max 10 us delay */ + for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ + if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) + break; + udelay(1); } + writel(fminterval, base + OHCI_FMINTERVAL); - /* - * disable interrupts - */ - writel(~(u32)0, base + OHCI_INTRDISABLE); - writel(~(u32)0, base + OHCI_INTRSTATUS); - + /* Now the controller is safely in SUSPEND and nothing can wake it up */ iounmap(base); } @@ -547,7 +543,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = { /* Pegatron Lucid (Ordissimo AIRIS) */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "M11JB"), - DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* Pegatron Lucid (Ordissimo) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), }, }, { } @@ -717,12 +720,30 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, return -ETIMEDOUT; } -bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) +#define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31 +#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31 + +bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev) { return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI; } + +/* The Intel Lynx Point chipset also has switchable ports. */ +bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && + pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI); +} + +bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) +{ + return usb_is_intel_ppt_switchable_xhci(pdev) || + usb_is_intel_lpt_switchable_xhci(pdev); +} EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); /* @@ -745,12 +766,21 @@ EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); */ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) { +#if defined(CONFIG_USB_XHCI_HCD) || defined(CONFIG_USB_XHCI_HCD_MODULE) u32 ports_available; - ports_available = 0xffffffff; + /* Read USB3PRM, the USB 3.0 Port Routing Mask Register + * Indicate the ports that can be changed from OS. + */ + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", + ports_available); + /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable - * Register, to turn on SuperSpeed terminations for all - * available ports. + * Register, to turn on SuperSpeed terminations for the + * switchable ports. */ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, cpu_to_le32(ports_available)); @@ -760,7 +790,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " "under xHCI: 0x%x\n", ports_available); - ports_available = 0xffffffff; + /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register + * Indicate the USB 2.0 ports to be controlled by the xHCI host. + */ + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", + ports_available); + /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to * switch the USB 2.0 power and data lines over to the xHCI * host. @@ -772,9 +811,28 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) &ports_available); dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over " "to xHCI: 0x%x\n", ports_available); +#else + /* Don't switchover the ports if the user hasn't compiled the xHCI + * driver. Otherwise they will see "dead" USB ports that don't power + * the devices. + */ + dev_warn(&xhci_pdev->dev, + "CONFIG_USB_XHCI_HCD is turned off, " + "defaulting to EHCI.\n"); + dev_warn(&xhci_pdev->dev, + "USB 3.0 devices will work at USB 2.0 speeds.\n"); +#endif /* CONFIG_USB_XHCI_HCD || CONFIG_USB_XHCI_HCD_MODULE */ + } EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); + /** * PCI Quirks for xHCI. * @@ -790,12 +848,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) void __iomem *op_reg_base; u32 val; int timeout; + int len = pci_resource_len(pdev, 0); if (!mmio_resource_enabled(pdev, 0)) return; - base = ioremap_nocache(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + base = ioremap_nocache(pci_resource_start(pdev, 0), len); if (base == NULL) return; @@ -805,9 +863,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) */ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); do { + if ((ext_cap_offset + sizeof(val)) > len) { + /* We're reading garbage from the controller */ + dev_warn(&pdev->dev, + "xHCI controller failing to respond"); + return; + } + if (!ext_cap_offset) /* We've reached the end of the extended capabilities */ goto hc_init; + val = readl(base + ext_cap_offset); if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) break; @@ -830,13 +896,18 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) } } - /* Disable any BIOS SMIs */ - writel(XHCI_LEGACY_DISABLE_SMI, - base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + /* Mask off (turn off) any enabled SMIs */ + val &= XHCI_LEGACY_DISABLE_SMI; + /* Mask all SMI events bits, RW1C */ + val |= XHCI_LEGACY_SMI_EVENTS; + /* Disable any BIOS SMIs and clear all SMI events*/ + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); +hc_init: if (usb_is_intel_switchable_xhci(pdev)) usb_enable_xhci_ports(pdev); -hc_init: + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); /* Wait for the host controller to be ready before writing any @@ -872,6 +943,22 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) { + /* Skip Netlogic mips SoC's internal PCI USB controller. + * This device does not need/support EHCI/OHCI handoff + */ + if (pdev->vendor == 0x184e) /* vendor Netlogic */ + return; + if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && + pdev->class != PCI_CLASS_SERIAL_USB_OHCI && + pdev->class != PCI_CLASS_SERIAL_USB_EHCI && + pdev->class != PCI_CLASS_SERIAL_USB_XHCI) + return; + + if (pci_enable_device(pdev) < 0) { + dev_warn(&pdev->dev, "Can't enable PCI device, " + "BIOS handoff failed.\n"); + return; + } if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) quirk_usb_handoff_uhci(pdev); else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) @@ -880,5 +967,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) quirk_usb_disable_ehci(pdev); else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) quirk_usb_handoff_xhci(pdev); + pci_disable_device(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b1002a8ef96..7f69a39163c 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -10,10 +10,12 @@ void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); #else static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} static inline void usb_amd_dev_put(void) {} +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} #endif /* CONFIG_PCI */ #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index fba99b12058..18cd76b779a 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -446,6 +446,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) return IRQ_NONE; uhci_writew(uhci, status, USBSTS); /* Clear it */ + spin_lock(&uhci->lock); + if (unlikely(!uhci->is_initialized)) /* not yet configured */ + goto done; + if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { if (status & USBSTS_HSE) dev_err(uhci_dev(uhci), "host system error, " @@ -454,7 +458,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) dev_err(uhci_dev(uhci), "host controller process " "error, something bad happened!\n"); if (status & USBSTS_HCH) { - spin_lock(&uhci->lock); if (uhci->rh_state >= UHCI_RH_RUNNING) { dev_err(uhci_dev(uhci), "host controller halted, " @@ -472,15 +475,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd) * pending unlinks */ mod_timer(&hcd->rh_timer, jiffies); } - spin_unlock(&uhci->lock); } } - if (status & USBSTS_RD) + if (status & USBSTS_RD) { + spin_unlock(&uhci->lock); usb_hcd_poll_rh_status(hcd); - else { - spin_lock(&uhci->lock); + } else { uhci_scan_schedule(uhci); + done: spin_unlock(&uhci->lock); } @@ -658,9 +661,9 @@ static int uhci_start(struct usb_hcd *hcd) */ mb(); + spin_lock_irq(&uhci->lock); configure_hc(uhci); uhci->is_initialized = 1; - spin_lock_irq(&uhci->lock); start_rh(uhci); spin_unlock_irq(&uhci->lock); return 0; diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 045cde4cbc3..850723fb678 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -221,7 +221,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) /* auto-stop if nothing connected for 1 second */ if (any_ports_active(uhci)) uhci->rh_state = UHCI_RH_RUNNING; - else if (time_after_eq(jiffies, uhci->auto_stop_time)) + else if (time_after_eq(jiffies, uhci->auto_stop_time) && + !uhci->wait_for_hp) suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); break; diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 84ed28b34f9..82539913ad8 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, if (usb_pipein(urb->pipe)) status |= TD_CTRL_SPD; - i = urb->num_sgs; + i = urb->num_mapped_sgs; if (len > 0 && i > 0) { sg = urb->sg; data = sg_dma_address(sg); diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index a403b53e86b..76083ae9213 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u remaining = urb->transfer_buffer_length; - for_each_sg(urb->sg, sg, urb->num_sgs, i) { + for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { dma_addr_t dma_addr; size_t dma_remaining; dma_addr_t sp, ep; @@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, remaining = urb->transfer_buffer_length; - for_each_sg(urb->sg, sg, urb->num_sgs, i) { + for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { size_t len; size_t sg_remaining; void *orig; diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index ce5c9e51748..4206f6bef6f 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -62,8 +62,9 @@ /* USB Legacy Support Control and Status Register - section 7.1.2 */ /* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ #define XHCI_LEGACY_CONTROL_OFFSET (0x04) -/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */ -#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17)) +/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */ +#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17)) +#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29) /* command register values to disable interrupts and halt the HC */ /* start/stop HC execution - do not write unless HC is halted*/ diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index ce9f974dac0..7520ebb4454 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -75,7 +75,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, */ memset(port_removable, 0, sizeof(port_removable)); for (i = 0; i < ports; i++) { - portsc = xhci_readl(xhci, xhci->usb3_ports[i]); + portsc = xhci_readl(xhci, xhci->usb2_ports[i]); /* If a device is removable, PORTSC reports a 0, same as in the * hub descriptor DeviceRemovable bits. */ diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index ffeee575fd2..af65322e18c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -180,8 +180,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, struct xhci_segment *next; next = xhci_segment_alloc(xhci, flags); - if (!next) + if (!next) { + prev = ring->first_seg; + while (prev) { + next = prev->next; + xhci_segment_free(xhci, prev); + prev = next; + } goto fail; + } xhci_link_segments(xhci, prev, next, link_trbs, isoc); prev = next; @@ -201,7 +208,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return ring; fail: - xhci_ring_free(xhci, ring); + kfree(ring); return NULL; } @@ -1002,26 +1009,44 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, } /* - * Convert bInterval expressed in frames (in 1-255 range) to exponent of + * Convert bInterval expressed in microframes (in 1-255 range) to exponent of * microframes, rounded down to nearest power of 2. */ -static unsigned int xhci_parse_frame_interval(struct usb_device *udev, - struct usb_host_endpoint *ep) +static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, + struct usb_host_endpoint *ep, unsigned int desc_interval, + unsigned int min_exponent, unsigned int max_exponent) { unsigned int interval; - interval = fls(8 * ep->desc.bInterval) - 1; - interval = clamp_val(interval, 3, 10); - if ((1 << interval) != 8 * ep->desc.bInterval) + interval = fls(desc_interval) - 1; + interval = clamp_val(interval, min_exponent, max_exponent); + if ((1 << interval) != desc_interval) dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", ep->desc.bEndpointAddress, 1 << interval, - 8 * ep->desc.bInterval); + desc_interval); return interval; } +static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + if (ep->desc.bInterval == 0) + return 0; + return xhci_microframes_to_exponent(udev, ep, + ep->desc.bInterval, 0, 15); +} + + +static unsigned int xhci_parse_frame_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + return xhci_microframes_to_exponent(udev, ep, + ep->desc.bInterval * 8, 3, 10); +} + /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field @@ -1040,7 +1065,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_bulk(&ep->desc)) { - interval = ep->desc.bInterval; + interval = xhci_parse_microframe_interval(udev, ep); break; } /* Fall through - SS and HS isoc/int have same decoding */ @@ -1489,15 +1514,11 @@ void xhci_free_command(struct xhci_hcd *xhci, void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct xhci_cd *cur_cd, *next_cd; int size; int i; /* Free the Event Ring Segment Table and the actual Event Ring */ - if (xhci->ir_set) { - xhci_writel(xhci, 0, &xhci->ir_set->erst_size); - xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); - xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); - } size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) pci_free_consistent(pdev, size, @@ -1509,11 +1530,16 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->event_ring = NULL; xhci_dbg(xhci, "Freed event ring\n"); - xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); + xhci->cmd_ring_reserved_trbs = 0; if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; xhci_dbg(xhci, "Freed command ring\n"); + list_for_each_entry_safe(cur_cd, next_cd, + &xhci->cancel_cmd_list, cancel_cmd_list) { + list_del(&cur_cd->cancel_cmd_list); + kfree(cur_cd); + } for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); @@ -1538,7 +1564,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->medium_streams_pool = NULL; xhci_dbg(xhci, "Freed medium stream array pool\n"); - xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); if (xhci->dcbaa) pci_free_consistent(pdev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); @@ -2004,6 +2029,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags); if (!xhci->cmd_ring) goto fail; + INIT_LIST_HEAD(&xhci->cancel_cmd_list); xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg(xhci, "First segment DMA is 0x%llx\n", (unsigned long long)xhci->cmd_ring->first_seg->dma); @@ -2107,6 +2133,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) fail: xhci_warn(xhci, "Couldn't initialize memory\n"); + xhci_halt(xhci); + xhci_reset(xhci); xhci_mem_cleanup(xhci); return -ENOMEM; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 50e7156a7d8..7998b6fc9bb 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -28,6 +28,7 @@ /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_ASROCK_P67 0x7023 @@ -109,8 +110,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd) /* Look for vendor-specific quirks */ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) { - if (pdev->revision == 0x0) { + (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK || + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) { + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && + pdev->revision == 0x0) { xhci->quirks |= XHCI_RESET_EP_QUIRK; xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" " endpoint cmd after reset endpoint\n"); @@ -123,6 +126,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd) xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u " "has broken MSI implementation\n", pdev->revision); + xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_NEC) @@ -139,12 +143,25 @@ static int xhci_pci_setup(struct usb_hcd *hcd) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; + /* + * PPT desktop boards DH77EB and DH77DF will power back on after + * a few seconds of being shutdown. The fix for this is to + * switch the ports from xHCI to EHCI on shutdown. We can't use + * DMI information to find those particular boards (since each + * vendor will change the board name), so we have to key off all + * PPT chipsets. + */ + xhci->quirks |= XHCI_SPURIOUS_REBOOT; + xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); + xhci->quirks |= XHCI_TRUST_TX_LENGTH; } + if (pdev->vendor == PCI_VENDOR_ID_VIA) + xhci->quirks |= XHCI_RESET_ON_RESUME; /* Make sure the HC is halted. */ retval = xhci_halt(xhci); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index b4b06910f68..151ca5e960c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -147,25 +147,34 @@ static void next_trb(struct xhci_hcd *xhci, */ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) { - union xhci_trb *next = ++(ring->dequeue); unsigned long long addr; ring->deq_updates++; - /* Update the dequeue pointer further if that was a link TRB or we're at - * the end of an event ring segment (which doesn't have link TRBS) - */ - while (last_trb(xhci, ring, ring->deq_seg, next)) { - if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); - if (!in_interrupt()) - xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n", - ring, - (unsigned int) ring->cycle_state); + + do { + /* + * Update the dequeue pointer further if that was a link TRB or + * we're at the end of an event ring segment (which doesn't have + * link TRBS) + */ + if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { + if (consumer && last_trb_on_last_seg(xhci, ring, + ring->deq_seg, ring->dequeue)) { + if (!in_interrupt()) + xhci_dbg(xhci, "Toggle cycle state " + "for ring %p = %i\n", + ring, + (unsigned int) + ring->cycle_state); + ring->cycle_state = (ring->cycle_state ? 0 : 1); + } + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } else { + ring->dequeue++; } - ring->deq_seg = ring->deq_seg->next; - ring->dequeue = ring->deq_seg->trbs; - next = ring->dequeue; - } + } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); + addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); } @@ -302,12 +311,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, /* Ring the host controller doorbell after placing a command on the ring */ void xhci_ring_cmd_db(struct xhci_hcd *xhci) { + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) + return; + xhci_dbg(xhci, "// Ding dong!\n"); xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); /* Flush PCI posted writes */ xhci_readl(xhci, &xhci->dba->doorbell[0]); } +static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) +{ + u64 temp_64; + int ret; + + xhci_dbg(xhci, "Abort command ring\n"); + + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) { + xhci_dbg(xhci, "The command ring isn't running, " + "Have the command ring been stopped?\n"); + return 0; + } + + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + if (!(temp_64 & CMD_RING_RUNNING)) { + xhci_dbg(xhci, "Command ring had been stopped\n"); + return 0; + } + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, + &xhci->op_regs->cmd_ring); + + /* Section 4.6.1.2 of xHCI 1.0 spec says software should + * time the completion od all xHCI commands, including + * the Command Abort operation. If software doesn't see + * CRR negated in a timely manner (e.g. longer than 5 + * seconds), then it should assume that the there are + * larger problems with the xHC and assert HCRST. + */ + ret = handshake(xhci, &xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); + if (ret < 0) { + xhci_err(xhci, "Stopped the command ring failed, " + "maybe the host is dead\n"); + xhci->xhc_state |= XHCI_STATE_DYING; + xhci_quiesce(xhci); + xhci_halt(xhci); + return -ESHUTDOWN; + } + + return 0; +} + +static int xhci_queue_cd(struct xhci_hcd *xhci, + struct xhci_command *command, + union xhci_trb *cmd_trb) +{ + struct xhci_cd *cd; + cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC); + if (!cd) + return -ENOMEM; + INIT_LIST_HEAD(&cd->cancel_cmd_list); + + cd->command = command; + cd->cmd_trb = cmd_trb; + list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list); + + return 0; +} + +/* + * Cancel the command which has issue. + * + * Some commands may hang due to waiting for acknowledgement from + * usb device. It is outside of the xHC's ability to control and + * will cause the command ring is blocked. When it occurs software + * should intervene to recover the command ring. + * See Section 4.6.1.1 and 4.6.1.2 + */ +int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, + union xhci_trb *cmd_trb) +{ + int retval = 0; + unsigned long flags; + + spin_lock_irqsave(&xhci->lock, flags); + + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_warn(xhci, "Abort the command ring," + " but the xHCI is dead.\n"); + retval = -ESHUTDOWN; + goto fail; + } + + /* queue the cmd desriptor to cancel_cmd_list */ + retval = xhci_queue_cd(xhci, command, cmd_trb); + if (retval) { + xhci_warn(xhci, "Queuing command descriptor failed.\n"); + goto fail; + } + + /* abort command ring */ + retval = xhci_abort_cmd_ring(xhci); + if (retval) { + xhci_err(xhci, "Abort command ring failed\n"); + if (unlikely(retval == -ESHUTDOWN)) { + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); + xhci_dbg(xhci, "xHCI host controller is dead.\n"); + return retval; + } + } + +fail: + spin_unlock_irqrestore(&xhci->lock, flags); + return retval; +} + void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, @@ -1037,6 +1157,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, } } +/* Complete the command and detele it from the devcie's command queue. + */ +static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, + struct xhci_command *command, u32 status) +{ + command->status = status; + list_del(&command->cmd_list); + if (command->completion) + complete(command->completion); + else + xhci_free_command(xhci, command); +} + + /* Check to see if a command in the device's command queue matches this one. * Signal the completion or free the command, and return 1. Return 0 if the * completed command isn't at the head of the command list. @@ -1055,15 +1189,155 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, if (xhci->cmd_ring->dequeue != command->command_trb) return 0; - command->status = GET_COMP_CODE(le32_to_cpu(event->status)); - list_del(&command->cmd_list); - if (command->completion) - complete(command->completion); - else - xhci_free_command(xhci, command); + xhci_complete_cmd_in_cmd_wait_list(xhci, command, + GET_COMP_CODE(le32_to_cpu(event->status))); return 1; } +/* + * Finding the command trb need to be cancelled and modifying it to + * NO OP command. And if the command is in device's command wait + * list, finishing and freeing it. + * + * If we can't find the command trb, we think it had already been + * executed. + */ +static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd) +{ + struct xhci_segment *cur_seg; + union xhci_trb *cmd_trb; + u32 cycle_state; + + if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) + return; + + /* find the current segment of command ring */ + cur_seg = find_trb_seg(xhci->cmd_ring->first_seg, + xhci->cmd_ring->dequeue, &cycle_state); + + if (!cur_seg) { + xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n", + xhci->cmd_ring->dequeue, + (unsigned long long) + xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + xhci->cmd_ring->dequeue)); + xhci_debug_ring(xhci, xhci->cmd_ring); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + return; + } + + /* find the command trb matched by cd from command ring */ + for (cmd_trb = xhci->cmd_ring->dequeue; + cmd_trb != xhci->cmd_ring->enqueue; + next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) { + /* If the trb is link trb, continue */ + if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3])) + continue; + + if (cur_cd->cmd_trb == cmd_trb) { + + /* If the command in device's command list, we should + * finish it and free the command structure. + */ + if (cur_cd->command) + xhci_complete_cmd_in_cmd_wait_list(xhci, + cur_cd->command, COMP_CMD_STOP); + + /* get cycle state from the origin command trb */ + cycle_state = le32_to_cpu(cmd_trb->generic.field[3]) + & TRB_CYCLE; + + /* modify the command trb to NO OP command */ + cmd_trb->generic.field[0] = 0; + cmd_trb->generic.field[1] = 0; + cmd_trb->generic.field[2] = 0; + cmd_trb->generic.field[3] = cpu_to_le32( + TRB_TYPE(TRB_CMD_NOOP) | cycle_state); + break; + } + } +} + +static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci) +{ + struct xhci_cd *cur_cd, *next_cd; + + if (list_empty(&xhci->cancel_cmd_list)) + return; + + list_for_each_entry_safe(cur_cd, next_cd, + &xhci->cancel_cmd_list, cancel_cmd_list) { + xhci_cmd_to_noop(xhci, cur_cd); + list_del(&cur_cd->cancel_cmd_list); + kfree(cur_cd); + } +} + +/* + * traversing the cancel_cmd_list. If the command descriptor according + * to cmd_trb is found, the function free it and return 1, otherwise + * return 0. + */ +static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci, + union xhci_trb *cmd_trb) +{ + struct xhci_cd *cur_cd, *next_cd; + + if (list_empty(&xhci->cancel_cmd_list)) + return 0; + + list_for_each_entry_safe(cur_cd, next_cd, + &xhci->cancel_cmd_list, cancel_cmd_list) { + if (cur_cd->cmd_trb == cmd_trb) { + if (cur_cd->command) + xhci_complete_cmd_in_cmd_wait_list(xhci, + cur_cd->command, COMP_CMD_STOP); + list_del(&cur_cd->cancel_cmd_list); + kfree(cur_cd); + return 1; + } + } + + return 0; +} + +/* + * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the + * trb pointed by the command ring dequeue pointer is the trb we want to + * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will + * traverse the cancel_cmd_list to trun the all of the commands according + * to command descriptor to NO-OP trb. + */ +static int handle_stopped_cmd_ring(struct xhci_hcd *xhci, + int cmd_trb_comp_code) +{ + int cur_trb_is_good = 0; + + /* Searching the cmd trb pointed by the command ring dequeue + * pointer in command descriptor list. If it is found, free it. + */ + cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci, + xhci->cmd_ring->dequeue); + + if (cmd_trb_comp_code == COMP_CMD_ABORT) + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + else if (cmd_trb_comp_code == COMP_CMD_STOP) { + /* traversing the cancel_cmd_list and canceling + * the command according to command descriptor + */ + xhci_cancel_cmd_in_cd_list(xhci); + + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; + /* + * ring command ring doorbell again to restart the + * command ring + */ + if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) + xhci_ring_cmd_db(xhci); + } + return cur_trb_is_good; +} + static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { @@ -1089,6 +1363,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci->error_bitmask |= 1 << 5; return; } + + if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) || + (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) { + /* If the return value is 0, we think the trb pointed by + * command ring dequeue pointer is a good trb. The good + * trb means we don't want to cancel the trb, but it have + * been stopped by host. So we should handle it normally. + * Otherwise, driver should invoke inc_deq() and return. + */ + if (handle_stopped_cmd_ring(xhci, + GET_COMP_CODE(le32_to_cpu(event->status)))) { + inc_deq(xhci, xhci->cmd_ring, false); + return; + } + } + switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_ENABLE_SLOT): @@ -1218,6 +1508,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci, * * Returns a zero-based port number, which is suitable for indexing into each of * the split roothubs' port arrays and bus state arrays. + * Add one to it in order to call xhci_find_slot_id_by_port. */ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd, struct xhci_hcd *xhci, u32 port_id) @@ -1340,7 +1631,7 @@ static void handle_port_status(struct xhci_hcd *xhci, temp |= PORT_LINK_STROBE | XDEV_U0; xhci_writel(xhci, temp, port_array[faked_port_index]); slot_id = xhci_find_slot_id_by_port(hcd, xhci, - faked_port_index); + faked_port_index + 1); if (!slot_id) { xhci_dbg(xhci, "slot_id is zero\n"); goto cleanup; @@ -1669,8 +1960,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, if (event_trb != ep_ring->dequeue && event_trb != td->last_trb) td->urb->actual_length = - td->urb->transfer_buffer_length - - TRB_LEN(le32_to_cpu(event->transfer_len)); + td->urb->transfer_buffer_length - + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); else td->urb->actual_length = 0; @@ -1702,7 +1993,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, /* Maybe the event was for the data stage? */ td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); xhci_dbg(xhci, "Waiting for status " "stage event\n"); return 0; @@ -1738,8 +2029,12 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: - frame->status = 0; - break; + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { + frame->status = 0; + break; + } + if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) + trb_comp_code = COMP_SHORT_TX; case COMP_SHORT_TX: frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ? -EREMOTEIO : 0; @@ -1755,6 +2050,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, break; case COMP_DEV_ERR: case COMP_STALL: + case COMP_TX_ERR: frame->status = -EPROTO; skip_td = true; break; @@ -1780,7 +2076,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); } len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); if (trb_comp_code != COMP_STOP_INVAL) { frame->actual_length = len; @@ -1837,13 +2133,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, switch (trb_comp_code) { case COMP_SUCCESS: /* Double check that the HW transferred everything. */ - if (event_trb != td->last_trb) { + if (event_trb != td->last_trb || + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { xhci_warn(xhci, "WARN Successful completion " "on short TX\n"); if (td->urb->transfer_flags & URB_SHORT_NOT_OK) *status = -EREMOTEIO; else *status = 0; + if ((xhci->quirks & XHCI_TRUST_TX_LENGTH)) + trb_comp_code = COMP_SHORT_TX; } else { *status = 0; } @@ -1863,18 +2162,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, "%d bytes untransferred\n", td->urb->ep->desc.bEndpointAddress, td->urb->transfer_buffer_length, - TRB_LEN(le32_to_cpu(event->transfer_len))); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); /* Fast path - was this the last TRB in the TD for this URB? */ if (event_trb == td->last_trb) { - if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); if (td->urb->transfer_buffer_length < td->urb->actual_length) { xhci_warn(xhci, "HC gave bad length " "of %d bytes left\n", - TRB_LEN(le32_to_cpu(event->transfer_len))); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); td->urb->actual_length = 0; if (td->urb->transfer_flags & URB_SHORT_NOT_OK) *status = -EREMOTEIO; @@ -1918,7 +2217,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, if (trb_comp_code != COMP_STOP_INVAL) td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - - TRB_LEN(le32_to_cpu(event->transfer_len)); + EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); } return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -1984,6 +2283,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) + break; + if (xhci->quirks & XHCI_TRUST_TX_LENGTH) + trb_comp_code = COMP_SHORT_TX; + else + xhci_warn(xhci, "WARN Successful completion on short TX: " + "needs XHCI_TRUST_TX_LENGTH quirk?\n"); case COMP_SHORT_TX: break; case COMP_STOP: @@ -2200,6 +2506,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, (trb_comp_code != COMP_STALL && trb_comp_code != COMP_BABBLE)) xhci_urb_free_priv(xhci, urb_priv); + else + kfree(urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && @@ -2351,7 +2659,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) u32 irq_pending; /* Acknowledge the PCI interrupt */ irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); - irq_pending |= 0x3; + irq_pending |= IMAN_IP; xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); } @@ -2570,7 +2878,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) struct scatterlist *sg; sg = NULL; - num_sgs = urb->num_sgs; + num_sgs = urb->num_mapped_sgs; temp = urb->transfer_buffer_length; xhci_dbg(xhci, "count sg list trbs: \n"); @@ -2754,7 +3062,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, return -EINVAL; num_trbs = count_sg_trbs_needed(xhci, urb); - num_sgs = urb->num_sgs; + num_sgs = urb->num_mapped_sgs; total_packet_count = roundup(urb->transfer_buffer_length, le16_to_cpu(urb->ep->desc.wMaxPacketSize)); @@ -3307,9 +3615,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, td = urb_priv->td[i]; for (j = 0; j < trbs_per_td; j++) { u32 remainder = 0; - field = TRB_TBC(burst_count) | TRB_TLBPC(residue); + field = 0; if (first_trb) { + field = TRB_TBC(burst_count) | + TRB_TLBPC(residue); /* Queue the isoc TRB */ field |= TRB_TYPE(TRB_ISOC); /* Assume URB_ISO_ASAP is set */ @@ -3340,7 +3650,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } else { td->last_trb = ep_ring->enqueue; field |= TRB_IOC; - if (xhci->hci_version == 0x100) { + if (xhci->hci_version == 0x100 && + !(xhci->quirks & + XHCI_AVOID_BEI)) { /* Set BEI bit except for the last td */ if (i < num_tds - 1) field |= TRB_BEI; @@ -3381,7 +3693,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check TD length */ if (running_total != td_len) { xhci_err(xhci, "ISOC TD length unmatch\n"); - return -EINVAL; + ret = -EINVAL; + goto cleanup; } } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 221f14e1fdd..4864b252478 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -51,7 +51,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ -static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, +int handshake(struct xhci_hcd *xhci, void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; @@ -104,8 +104,10 @@ int xhci_halt(struct xhci_hcd *xhci) ret = handshake(xhci, &xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); - if (!ret) + if (!ret) { xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + } return ret; } @@ -163,7 +165,7 @@ int xhci_reset(struct xhci_hcd *xhci) xhci_writel(xhci, command, &xhci->op_regs->command); ret = handshake(xhci, &xhci->op_regs->command, - CMD_RESET, 0, 250 * 1000); + CMD_RESET, 0, 10 * 1000 * 1000); if (ret) return ret; @@ -172,7 +174,8 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); + return handshake(xhci, &xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); } /* @@ -389,6 +392,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci) return -ENODEV; } xhci->shared_hcd->state = HC_STATE_RUNNING; + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); @@ -444,6 +448,11 @@ int xhci_run(struct usb_hcd *hcd) if (ret) { legacy_irq: + if (!pdev->irq) { + xhci_err(xhci, "No msi-x/msi found and " + "no IRQ in BIOS\n"); + return -EINVAL; + } /* fall back to legacy interrupt*/ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, hcd->irq_descr, hcd); @@ -588,6 +597,9 @@ void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + if (xhci->quirks & XHCI_SPURIOUS_REBOOT) + usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); + spin_lock_irq(&xhci->lock); xhci_halt(xhci); spin_unlock_irq(&xhci->lock); @@ -605,11 +617,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci) xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); - xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); - xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); + xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); } static void xhci_restore_registers(struct xhci_hcd *xhci) @@ -618,10 +630,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); - xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); - xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); + xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); + xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); + xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); } static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) @@ -710,7 +723,7 @@ int xhci_suspend(struct xhci_hcd *xhci) command &= ~CMD_RUN; xhci_writel(xhci, command, &xhci->op_regs->command); if (handshake(xhci, &xhci->op_regs->status, - STS_HALT, STS_HALT, 100*100)) { + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) { xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; @@ -724,8 +737,8 @@ int xhci_suspend(struct xhci_hcd *xhci) command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_CSS; xhci_writel(xhci, command, &xhci->op_regs->command); - if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { - xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); + if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) { + xhci_warn(xhci, "WARN: xHC save state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } @@ -780,8 +793,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) command |= CMD_CRS; xhci_writel(xhci, command, &xhci->op_regs->command); if (handshake(xhci, &xhci->op_regs->status, - STS_RESTORE, 0, 10*100)) { - xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); + STS_RESTORE, 0, 10 * 1000)) { + xhci_warn(xhci, "WARN: xHC restore state timeout\n"); spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } @@ -1568,6 +1581,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, /* FIXME: can we allocate more resources for the HC? */ break; case COMP_BW_ERR: + case COMP_2ND_BW_ERR: dev_warn(&udev->dev, "Not enough bandwidth " "for new device state.\n"); ret = -ENOSPC; @@ -1764,6 +1778,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, struct completion *cmd_completion; u32 *cmd_status; struct xhci_virt_device *virt_dev; + union xhci_trb *cmd_trb; spin_lock_irqsave(&xhci->lock, flags); virt_dev = xhci->devs[udev->slot_id]; @@ -1806,6 +1821,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, } init_completion(cmd_completion); + cmd_trb = xhci->cmd_ring->dequeue; if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, udev->slot_id, must_succeed); @@ -1827,14 +1843,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, /* Wait for the configure endpoint command to complete */ timeleft = wait_for_completion_interruptible_timeout( cmd_completion, - USB_CTRL_SET_TIMEOUT); + XHCI_CMD_DEFAULT_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for %s command\n", timeleft == 0 ? "Timeout" : "Signal", ctx_change == 0 ? "configure endpoint" : "evaluate context"); - /* FIXME cancel the configure endpoint command */ + /* cancel the configure endpoint command */ + ret = xhci_cancel_cmd(xhci, command, cmd_trb); + if (ret < 0) + return ret; return -ETIME; } @@ -2183,8 +2202,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, if (ret < 0) return ret; - max_streams = USB_SS_MAX_STREAMS( - eps[i]->ss_ep_comp.bmAttributes); + max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); if (max_streams < (*num_streams - 1)) { xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", eps[i]->desc.bEndpointAddress, @@ -2768,8 +2786,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) unsigned long flags; int timeleft; int ret; + union xhci_trb *cmd_trb; spin_lock_irqsave(&xhci->lock, flags); + cmd_trb = xhci->cmd_ring->dequeue; ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); @@ -2781,12 +2801,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) /* XXX: how much time for xHC slot assignment? */ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, - USB_CTRL_SET_TIMEOUT); + XHCI_CMD_DEFAULT_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for a slot\n", timeleft == 0 ? "Timeout" : "Signal"); - /* FIXME cancel the enable slot request */ - return 0; + /* cancel the enable slot request */ + return xhci_cancel_cmd(xhci, NULL, cmd_trb); } if (!xhci->slot_id) { @@ -2847,6 +2867,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u64 temp_64; + union xhci_trb *cmd_trb; if (!udev->slot_id) { xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); @@ -2885,6 +2906,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); spin_lock_irqsave(&xhci->lock, flags); + cmd_trb = xhci->cmd_ring->dequeue; ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, udev->slot_id); if (ret) { @@ -2897,7 +2919,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, - USB_CTRL_SET_TIMEOUT); + XHCI_CMD_DEFAULT_TIMEOUT); /* FIXME: From section 4.3.4: "Software shall be responsible for timing * the SetAddress() "recovery interval" required by USB and aborting the * command on a timeout. @@ -2905,7 +2927,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for a slot\n", timeleft == 0 ? "Timeout" : "Signal"); - /* FIXME cancel the address device command */ + /* cancel the address device command */ + ret = xhci_cancel_cmd(xhci, NULL, cmd_trb); + if (ret < 0) + return ret; return -ETIME; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 49ce76c6b41..94724b051e3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -205,6 +205,10 @@ struct xhci_op_regs { #define CMD_PM_INDEX (1 << 11) /* bits 12:31 are reserved (and should be preserved on writes). */ +/* IMAN - Interrupt Management Register */ +#define IMAN_IE (1 << 1) +#define IMAN_IP (1 << 0) + /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ #define STS_HALT XHCI_STS_HALT @@ -827,6 +831,10 @@ struct xhci_transfer_event { __le32 flags; }; +/* Transfer event TRB length bit mask */ +/* bits 0:23 */ +#define EVENT_TRB_LEN(p) ((p) & 0xffffff) + /** Transfer Event bit fields **/ #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) @@ -900,7 +908,6 @@ struct xhci_transfer_event { /* Invalid Stream ID Error */ #define COMP_STRID_ERR 34 /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */ -/* FIXME - check for this */ #define COMP_2ND_BW_ERR 35 /* Split Transaction Error */ #define COMP_SPLIT_ERR 36 @@ -1067,6 +1074,9 @@ union xhci_trb { #define TRB_MFINDEX_WRAP 39 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) + /* Nec vendor-specific command completion event. */ #define TRB_NEC_CMD_COMP 48 /* Get NEC firmware revision. */ @@ -1108,6 +1118,16 @@ struct xhci_td { union xhci_trb *last_trb; }; +/* xHCI command default timeout value */ +#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ) + +/* command descriptor */ +struct xhci_cd { + struct list_head cancel_cmd_list; + struct xhci_command *command; + union xhci_trb *cmd_trb; +}; + struct xhci_dequeue_state { struct xhci_segment *new_deq_seg; union xhci_trb *new_deq_ptr; @@ -1249,6 +1269,11 @@ struct xhci_hcd { /* data structures */ struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; +#define CMD_RING_STATE_RUNNING (1 << 0) +#define CMD_RING_STATE_ABORTED (1 << 1) +#define CMD_RING_STATE_STOPPED (1 << 2) + struct list_head cancel_cmd_list; unsigned int cmd_ring_reserved_trbs; struct xhci_ring *event_ring; struct xhci_erst erst; @@ -1312,6 +1337,9 @@ struct xhci_hcd { #define XHCI_BROKEN_MSI (1 << 6) #define XHCI_RESET_ON_RESUME (1 << 7) #define XHCI_AMD_0x96_HOST (1 << 9) +#define XHCI_TRUST_TX_LENGTH (1 << 10) +#define XHCI_SPURIOUS_REBOOT (1 << 13) +#define XHCI_AVOID_BEI (1 << 15) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1480,6 +1508,8 @@ void xhci_unregister_pci(void); #endif /* xHCI host controller glue */ +int handshake(struct xhci_hcd *xhci, void __iomem *ptr, + u32 mask, u32 done, int usec); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); @@ -1562,6 +1592,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); void xhci_stop_endpoint_command_watchdog(unsigned long arg); +int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, + union xhci_trb *cmd_trb); void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id); diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index 68ab460a735..0a70c9836df 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = { { APPLEDISPLAY_DEVICE(0x9219) }, { APPLEDISPLAY_DEVICE(0x921c) }, { APPLEDISPLAY_DEVICE(0x921d) }, + { APPLEDISPLAY_DEVICE(0x9236) }, /* Terminating entry */ { } diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index fc15ad4c313..723e833b274 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c @@ -259,7 +259,7 @@ static int emi62_load_firmware (struct usb_device *dev) return err; } -static const struct usb_device_id id_table[] __devinitconst = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c index fe1d44319d0..8f725f65191 100644 --- a/drivers/usb/misc/isight_firmware.c +++ b/drivers/usb/misc/isight_firmware.c @@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf, ptr = firmware->data; + buf[0] = 0x01; if (usb_control_msg - (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1, + (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1, 300) != 1) { printk(KERN_ERR "Failed to initialise isight firmware loader\n"); @@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf, } } + buf[0] = 0x00; if (usb_control_msg - (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1, + (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1, 300) != 1) { printk(KERN_ERR "isight firmware loading completion failed\n"); ret = -ENODEV; diff --git a/drivers/usb/misc/mdm_ctrl_bridge.c b/drivers/usb/misc/mdm_ctrl_bridge.c new file mode 100644 index 00000000000..87adf2e92cb --- /dev/null +++ b/drivers/usb/misc/mdm_ctrl_bridge.c @@ -0,0 +1,729 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char *ctrl_bridge_names[] = { + "dun_ctrl_hsic0", + "rmnet_ctrl_hsic0" +}; + +/* polling interval for Interrupt ep */ +#define HS_INTERVAL 7 +#define FS_LS_INTERVAL 3 + +#define ACM_CTRL_DTR (1 << 0) +#define DEFAULT_READ_URB_LENGTH 4096 + +struct ctrl_bridge { + + struct usb_device *udev; + struct usb_interface *intf; + + unsigned int int_pipe; + struct urb *inturb; + void *intbuf; + + struct urb *readurb; + void *readbuf; + + struct usb_anchor tx_submitted; + struct usb_ctrlrequest *in_ctlreq; + + struct bridge *brdg; + struct platform_device *pdev; + + /* input control lines (DSR, CTS, CD, RI) */ + unsigned int cbits_tohost; + + /* output control lines (DTR, RTS) */ + unsigned int cbits_tomdm; + + /* counters */ + unsigned int snd_encap_cmd; + unsigned int get_encap_res; + unsigned int resp_avail; + unsigned int set_ctrl_line_sts; + unsigned int notify_ser_state; + +}; + +static struct ctrl_bridge *__dev[MAX_BRIDGE_DEVICES]; + +/* counter used for indexing ctrl bridge devices */ +static int ch_id; + +unsigned int ctrl_bridge_get_cbits_tohost(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + return dev->cbits_tohost; +} +EXPORT_SYMBOL(ctrl_bridge_get_cbits_tohost); + +int ctrl_bridge_set_cbits(unsigned int id, unsigned int cbits) +{ + struct ctrl_bridge *dev; + struct bridge *brdg; + int retval; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + pr_debug("%s: dev[id] =%u cbits : %u\n", __func__, id, cbits); + + brdg = dev->brdg; + if (!brdg) + return -ENODEV; + + dev->cbits_tomdm = cbits; + + retval = ctrl_bridge_write(id, NULL, 0); + + /* if DTR is high, update latest modem info to host */ + if (brdg && (cbits & ACM_CTRL_DTR) && brdg->ops.send_cbits) + brdg->ops.send_cbits(brdg->ctx, dev->cbits_tohost); + + return retval; +} +EXPORT_SYMBOL(ctrl_bridge_set_cbits); + +static void resp_avail_cb(struct urb *urb) +{ + struct ctrl_bridge *dev = urb->context; + struct usb_device *udev; + int status = 0; + int resubmit_urb = 1; + struct bridge *brdg = dev->brdg; + + udev = interface_to_usbdev(dev->intf); + switch (urb->status) { + case 0: + /*success*/ + dev->get_encap_res++; + if (brdg && brdg->ops.send_pkt) + brdg->ops.send_pkt(brdg->ctx, urb->transfer_buffer, + urb->actual_length); + break; + + /*do not resubmit*/ + case -ESHUTDOWN: + case -ENOENT: + case -ECONNRESET: + /* unplug */ + case -EPROTO: + /*babble error*/ + resubmit_urb = 0; + /*resubmit*/ + case -EOVERFLOW: + default: + dev_dbg(&udev->dev, "%s: non zero urb status = %d\n", + __func__, urb->status); + } + + if (resubmit_urb) { + /*re- submit int urb to check response available*/ + status = usb_submit_urb(dev->inturb, GFP_ATOMIC); + if (status) + dev_err(&udev->dev, + "%s: Error re-submitting Int URB %d\n", + __func__, status); + } +} + +static void notification_available_cb(struct urb *urb) +{ + int status; + struct usb_cdc_notification *ctrl; + struct usb_device *udev; + struct ctrl_bridge *dev = urb->context; + struct bridge *brdg = dev->brdg; + unsigned int ctrl_bits; + unsigned char *data; + + udev = interface_to_usbdev(dev->intf); + + switch (urb->status) { + case 0: + /*success*/ + break; + case -ESHUTDOWN: + case -ENOENT: + case -ECONNRESET: + case -EPROTO: + /* unplug */ + return; + case -EPIPE: + dev_err(&udev->dev, "%s: stall on int endpoint\n", __func__); + /* TBD : halt to be cleared in work */ + case -EOVERFLOW: + default: + pr_debug_ratelimited("%s: non zero urb status = %d\n", + __func__, urb->status); + goto resubmit_int_urb; + } + + ctrl = (struct usb_cdc_notification *)urb->transfer_buffer; + data = (unsigned char *)(ctrl + 1); + + switch (ctrl->bNotificationType) { + case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: + dev->resp_avail++; + usb_fill_control_urb(dev->readurb, udev, + usb_rcvctrlpipe(udev, 0), + (unsigned char *)dev->in_ctlreq, + dev->readbuf, + DEFAULT_READ_URB_LENGTH, + resp_avail_cb, dev); + + status = usb_submit_urb(dev->readurb, GFP_ATOMIC); + if (status) { + dev_err(&udev->dev, + "%s: Error submitting Read URB %d\n", + __func__, status); + goto resubmit_int_urb; + } + return; + case USB_CDC_NOTIFY_NETWORK_CONNECTION: + dev_dbg(&udev->dev, "%s network\n", ctrl->wValue ? + "connected to" : "disconnected from"); + break; + case USB_CDC_NOTIFY_SERIAL_STATE: + dev->notify_ser_state++; + ctrl_bits = get_unaligned_le16(data); + dev_dbg(&udev->dev, "serial state: %d\n", ctrl_bits); + dev->cbits_tohost = ctrl_bits; + if (brdg && brdg->ops.send_cbits) + brdg->ops.send_cbits(brdg->ctx, ctrl_bits); + break; + default: + dev_err(&udev->dev, "%s: unknown notification %d received:" + "index %d len %d data0 %d data1 %d", + __func__, ctrl->bNotificationType, ctrl->wIndex, + ctrl->wLength, data[0], data[1]); + } + +resubmit_int_urb: + status = usb_submit_urb(urb, GFP_ATOMIC); + if (status) + dev_err(&udev->dev, "%s: Error re-submitting Int URB %d\n", + __func__, status); +} + +int ctrl_bridge_start_read(struct ctrl_bridge *dev) +{ + int retval = 0; + struct usb_device *udev; + + udev = interface_to_usbdev(dev->intf); + + retval = usb_autopm_get_interface_async(dev->intf); + if (retval < 0) { + dev_err(&udev->dev, "%s resumption fail\n", __func__); + goto done_nopm; + } + + retval = usb_submit_urb(dev->inturb, GFP_KERNEL); + if (retval < 0) + dev_err(&udev->dev, "%s intr submit %d\n", __func__, retval); + + usb_autopm_put_interface_async(dev->intf); +done_nopm: + return retval; +} + +static int ctrl_bridge_stop_read(struct ctrl_bridge *dev) +{ + if (dev->readurb) { + dev_dbg(&dev->udev->dev, "killing rcv urb\n"); + usb_unlink_urb(dev->readurb); + } + + if (dev->inturb) { + dev_dbg(&dev->udev->dev, "killing int urb\n"); + usb_unlink_urb(dev->inturb); + } + + return 0; +} + +int ctrl_bridge_open(struct bridge *brdg) +{ + struct ctrl_bridge *dev; + + if (!brdg) { + err("bridge is null\n"); + return -EINVAL; + } + + if (brdg->ch_id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[brdg->ch_id]; + if (!dev) { + err("dev is null\n"); + return -ENODEV; + } + + dev->brdg = brdg; + dev->snd_encap_cmd = 0; + dev->get_encap_res = 0; + dev->resp_avail = 0; + dev->set_ctrl_line_sts = 0; + dev->notify_ser_state = 0; + + return ctrl_bridge_start_read(dev); +} +EXPORT_SYMBOL(ctrl_bridge_open); + +void ctrl_bridge_close(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return; + + dev = __dev[id]; + if (!dev && !dev->brdg) + return; + + dev_dbg(&dev->udev->dev, "%s:\n", __func__); + + ctrl_bridge_set_cbits(dev->brdg->ch_id, 0); + usb_unlink_anchored_urbs(&dev->tx_submitted); + ctrl_bridge_stop_read(dev); + + dev->brdg = NULL; +} +EXPORT_SYMBOL(ctrl_bridge_close); + +static void ctrl_write_callback(struct urb *urb) +{ + + if (urb->status) { + pr_debug("Write status/size %d/%d\n", + urb->status, urb->actual_length); + } + + kfree(urb->transfer_buffer); + kfree(urb->setup_packet); + usb_free_urb(urb); +} + +int ctrl_bridge_write(unsigned int id, char *data, size_t size) +{ + int result; + struct urb *writeurb; + struct usb_ctrlrequest *out_ctlreq; + struct usb_device *udev; + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) { + result = -EINVAL; + goto free_data; + } + + dev = __dev[id]; + + if (!dev) { + result = -ENODEV; + goto free_data; + } + + udev = interface_to_usbdev(dev->intf); + + dev_dbg(&udev->dev, "%s:[id]:%u: write (%d bytes)\n", + __func__, id, size); + + writeurb = usb_alloc_urb(0, GFP_ATOMIC); + if (!writeurb) { + dev_err(&udev->dev, "%s: error allocating read urb\n", + __func__); + result = -ENOMEM; + goto free_data; + } + + out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_ATOMIC); + if (!out_ctlreq) { + dev_err(&udev->dev, + "%s: error allocating setup packet buffer\n", + __func__); + result = -ENOMEM; + goto free_urb; + } + + /* CDC Send Encapsulated Request packet */ + out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | + USB_RECIP_INTERFACE); + if (!data && !size) { + out_ctlreq->bRequest = USB_CDC_REQ_SET_CONTROL_LINE_STATE; + out_ctlreq->wValue = dev->cbits_tomdm; + dev->set_ctrl_line_sts++; + } else { + out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; + out_ctlreq->wValue = 0; + dev->snd_encap_cmd++; + } + out_ctlreq->wIndex = + dev->intf->cur_altsetting->desc.bInterfaceNumber; + out_ctlreq->wLength = cpu_to_le16(size); + + usb_fill_control_urb(writeurb, udev, + usb_sndctrlpipe(udev, 0), + (unsigned char *)out_ctlreq, + (void *)data, size, + ctrl_write_callback, NULL); + + result = usb_autopm_get_interface_async(dev->intf); + if (result < 0) { + dev_err(&udev->dev, "%s: unable to resume interface: %d\n", + __func__, result); + + /* + * Revisit: if (result == -EPERM) + * bridge_suspend(dev->intf, PMSG_SUSPEND); + */ + + goto free_ctrlreq; + } + + usb_anchor_urb(writeurb, &dev->tx_submitted); + result = usb_submit_urb(writeurb, GFP_ATOMIC); + if (result < 0) { + dev_err(&udev->dev, "%s: submit URB error %d\n", + __func__, result); + usb_autopm_put_interface_async(dev->intf); + goto unanchor_urb; + } + + return size; + +unanchor_urb: + usb_unanchor_urb(writeurb); +free_ctrlreq: + kfree(out_ctlreq); +free_urb: + usb_free_urb(writeurb); +free_data: + kfree(data); + + return result; +} +EXPORT_SYMBOL(ctrl_bridge_write); + +int ctrl_bridge_suspend(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + usb_kill_anchored_urbs(&dev->tx_submitted); + + return ctrl_bridge_stop_read(dev); +} + +int ctrl_bridge_resume(unsigned int id) +{ + struct ctrl_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev) + return -ENODEV; + + return ctrl_bridge_start_read(dev); +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t ctrl_bridge_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ctrl_bridge *dev; + char *buf; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName#%s dev %p\n" + "snd encap cmd cnt: %u\n" + "get encap res cnt: %u\n" + "res available cnt: %u\n" + "set ctrlline sts cnt: %u\n" + "notify ser state cnt: %u\n" + "cbits_tomdm: %d\n" + "cbits_tohost: %d\n", + dev->pdev->name, dev, + dev->snd_encap_cmd, + dev->get_encap_res, + dev->resp_avail, + dev->set_ctrl_line_sts, + dev->notify_ser_state, + dev->cbits_tomdm, + dev->cbits_tohost); + + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t ctrl_bridge_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ctrl_bridge *dev; + int i; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + dev->snd_encap_cmd = 0; + dev->get_encap_res = 0; + dev->resp_avail = 0; + dev->set_ctrl_line_sts = 0; + dev->notify_ser_state = 0; + } + return count; +} + +const struct file_operations ctrl_stats_ops = { + .read = ctrl_bridge_read_stats, + .write = ctrl_bridge_reset_stats, +}; + +struct dentry *ctrl_dent; +struct dentry *ctrl_dfile; +static void ctrl_bridge_debugfs_init(void) +{ + ctrl_dent = debugfs_create_dir("ctrl_hsic_bridge", 0); + if (IS_ERR(ctrl_dent)) + return; + + ctrl_dfile = + debugfs_create_file("status", 0644, ctrl_dent, 0, + &ctrl_stats_ops); + if (!ctrl_dfile || IS_ERR(ctrl_dfile)) + debugfs_remove(ctrl_dent); +} + +static void ctrl_bridge_debugfs_exit(void) +{ + debugfs_remove(ctrl_dfile); + debugfs_remove(ctrl_dent); +} + +#else +static void ctrl_bridge_debugfs_init(void) { } +static void ctrl_bridge_debugfs_exit(void) { } +#endif + +int +ctrl_bridge_probe(struct usb_interface *ifc, struct usb_host_endpoint *int_in, + int id) +{ + struct ctrl_bridge *dev; + struct usb_device *udev; + struct usb_endpoint_descriptor *ep; + u16 wMaxPacketSize; + int retval = 0; + int interval; + + udev = interface_to_usbdev(ifc); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + dev_err(&udev->dev, "%s: unable to allocate dev\n", + __func__); + return -ENOMEM; + } + dev->pdev = platform_device_alloc(ctrl_bridge_names[id], id); + if (!dev->pdev) { + dev_err(&dev->udev->dev, + "%s: unable to allocate platform device\n", __func__); + retval = -ENOMEM; + goto nomem; + } + + dev->udev = udev; + dev->int_pipe = usb_rcvintpipe(udev, + int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + dev->intf = ifc; + + init_usb_anchor(&dev->tx_submitted); + + /*use max pkt size from ep desc*/ + ep = &dev->intf->cur_altsetting->endpoint[0].desc; + + dev->inturb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->inturb) { + dev_err(&udev->dev, "%s: error allocating int urb\n", __func__); + retval = -ENOMEM; + goto pdev_del; + } + + wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize); + + dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL); + if (!dev->intbuf) { + dev_err(&udev->dev, "%s: error allocating int buffer\n", + __func__); + retval = -ENOMEM; + goto free_inturb; + } + + interval = + (udev->speed == USB_SPEED_HIGH) ? HS_INTERVAL : FS_LS_INTERVAL; + + usb_fill_int_urb(dev->inturb, udev, dev->int_pipe, + dev->intbuf, wMaxPacketSize, + notification_available_cb, dev, interval); + + dev->readurb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->readurb) { + dev_err(&udev->dev, "%s: error allocating read urb\n", + __func__); + retval = -ENOMEM; + goto free_intbuf; + } + + dev->readbuf = kmalloc(DEFAULT_READ_URB_LENGTH, GFP_KERNEL); + if (!dev->readbuf) { + dev_err(&udev->dev, "%s: error allocating read buffer\n", + __func__); + retval = -ENOMEM; + goto free_rurb; + } + + dev->in_ctlreq = kmalloc(sizeof(*dev->in_ctlreq), GFP_KERNEL); + if (!dev->in_ctlreq) { + dev_err(&udev->dev, + "%s:error allocating setup packet buffer\n", + __func__); + retval = -ENOMEM; + goto free_rbuf; + } + + dev->in_ctlreq->bRequestType = + (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); + dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; + dev->in_ctlreq->wValue = 0; + dev->in_ctlreq->wIndex = + dev->intf->cur_altsetting->desc.bInterfaceNumber; + dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH); + + __dev[id] = dev; + + platform_device_add(dev->pdev); + + ch_id++; + + return retval; + +free_rbuf: + kfree(dev->readbuf); +free_rurb: + usb_free_urb(dev->readurb); +free_intbuf: + kfree(dev->intbuf); +free_inturb: + usb_free_urb(dev->inturb); +pdev_del: + platform_device_del(dev->pdev); +nomem: + kfree(dev); + + return retval; +} + +void ctrl_bridge_disconnect(unsigned int id) +{ + struct ctrl_bridge *dev = __dev[id]; + + dev_dbg(&dev->udev->dev, "%s:\n", __func__); + + kfree(dev->in_ctlreq); + kfree(dev->readbuf); + kfree(dev->intbuf); + + usb_free_urb(dev->readurb); + usb_free_urb(dev->inturb); + + platform_device_del(dev->pdev); + __dev[id] = NULL; + ch_id--; + + kfree(dev); +} + +static int __init ctrl_bridge_init(void) +{ + ctrl_bridge_debugfs_init(); + + return 0; +} +module_init(ctrl_bridge_init); + +static void __exit ctrl_bridge_exit(void) +{ + ctrl_bridge_debugfs_exit(); +} +module_exit(ctrl_bridge_exit); + +MODULE_DESCRIPTION("Qualcomm modem control bridge driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/misc/mdm_data_bridge.c b/drivers/usb/misc/mdm_data_bridge.c new file mode 100644 index 00000000000..c41fcfb0b67 --- /dev/null +++ b/drivers/usb/misc/mdm_data_bridge.c @@ -0,0 +1,923 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_RX_URBS 50 +#define RMNET_RX_BUFSIZE 2048 + +#define STOP_SUBMIT_URB_LIMIT 400 +#define FLOW_CTRL_EN_THRESHOLD 500 +#define FLOW_CTRL_DISABLE 300 +#define FLOW_CTRL_SUPPORT 1 + +static const char *data_bridge_names[] = { + "dun_data_hsic0", + "rmnet_data_hsic0" +}; + +static struct workqueue_struct *bridge_wq; + +static unsigned int fctrl_support = FLOW_CTRL_SUPPORT; +module_param(fctrl_support, uint, S_IRUGO | S_IWUSR); + +static unsigned int fctrl_en_thld = FLOW_CTRL_EN_THRESHOLD; +module_param(fctrl_en_thld, uint, S_IRUGO | S_IWUSR); + +static unsigned int fctrl_dis_thld = FLOW_CTRL_DISABLE; +module_param(fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); + +unsigned int max_rx_urbs = MAX_RX_URBS; +module_param(max_rx_urbs, uint, S_IRUGO | S_IWUSR); + +unsigned int stop_submit_urb_limit = STOP_SUBMIT_URB_LIMIT; +module_param(stop_submit_urb_limit, uint, S_IRUGO | S_IWUSR); + +#define TX_HALT BIT(0) +#define RX_HALT BIT(1) +#define SUSPENDED BIT(2) + +struct data_bridge { + struct usb_interface *intf; + struct usb_device *udev; + unsigned int bulk_in; + unsigned int bulk_out; + + /* keep track of in-flight URBs */ + struct usb_anchor tx_active; + struct usb_anchor rx_active; + + /* keep track of outgoing URBs during suspend */ + struct usb_anchor delayed; + + struct list_head rx_idle; + struct sk_buff_head rx_done; + + struct workqueue_struct *wq; + struct work_struct process_rx_w; + + struct bridge *brdg; + + /* work queue function for handling halt conditions */ + struct work_struct kevent; + + unsigned long flags; + + struct platform_device *pdev; + + /* counters */ + atomic_t pending_txurbs; + unsigned int txurb_drp_cnt; + unsigned long to_host; + unsigned long to_modem; + unsigned int tx_throttled_cnt; + unsigned int tx_unthrottled_cnt; + unsigned int rx_throttled_cnt; + unsigned int rx_unthrottled_cnt; +}; + +static struct data_bridge *__dev[MAX_BRIDGE_DEVICES]; + +/* counter used for indexing data bridge devices */ +static int ch_id; + +static int submit_rx_urb(struct data_bridge *dev, struct urb *urb, + gfp_t flags); + +static inline bool rx_halted(struct data_bridge *dev) +{ + return test_bit(RX_HALT, &dev->flags); +} + +static inline bool rx_throttled(struct bridge *brdg) +{ + return test_bit(RX_THROTTLED, &brdg->flags); +} + +int data_bridge_unthrottle_rx(unsigned int id) +{ + struct data_bridge *dev; + + if (id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[id]; + if (!dev && !dev->brdg) + return -ENODEV; + + dev->rx_unthrottled_cnt++; + queue_work(dev->wq, &dev->process_rx_w); + + return 0; +} +EXPORT_SYMBOL(data_bridge_unthrottle_rx); + +static void data_bridge_process_rx(struct work_struct *work) +{ + int retval; + unsigned long flags; + struct urb *rx_idle; + struct sk_buff *skb; + struct data_bridge *dev = + container_of(work, struct data_bridge, process_rx_w); + + struct bridge *brdg = dev->brdg; + + if (!brdg || !brdg->ops.send_pkt || rx_halted(dev)) + return; + + while (!rx_throttled(brdg) && (skb = skb_dequeue(&dev->rx_done))) { + dev->to_host++; + /* hand off sk_buff to client,they'll need to free it */ + retval = brdg->ops.send_pkt(brdg->ctx, skb, skb->len); + if (retval == -ENOTCONN || retval == -EINVAL) { + return; + } else if (retval == -EBUSY) { + dev->rx_throttled_cnt++; + break; + } + } + + spin_lock_irqsave(&dev->rx_done.lock, flags); + if (dev->rx_done.qlen > stop_submit_urb_limit && rx_throttled(brdg)) { + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + return; + } + + while (!list_empty(&dev->rx_idle)) { + + rx_idle = list_first_entry(&dev->rx_idle, struct urb, urb_list); + list_del(&rx_idle->urb_list); + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + retval = submit_rx_urb(dev, rx_idle, GFP_KERNEL); + spin_lock_irqsave(&dev->rx_done.lock, flags); + if (retval) + break; + } + spin_unlock_irqrestore(&dev->rx_done.lock, flags); +} + +static void data_bridge_read_cb(struct urb *urb) +{ + struct bridge *brdg; + struct sk_buff *skb = urb->context; + struct data_bridge *dev = *(struct data_bridge **)skb->cb; + bool queue = 0; + + brdg = dev->brdg; + + skb_put(skb, urb->actual_length); + + switch (urb->status) { + case 0: /* success */ + queue = 1; + spin_lock(&dev->rx_done.lock); + __skb_queue_tail(&dev->rx_done, skb); + spin_unlock(&dev->rx_done.lock); + break; + + /*do not resubmit*/ + case -EPIPE: + set_bit(RX_HALT, &dev->flags); + dev_err(&dev->udev->dev, "%s: epout halted\n", __func__); + schedule_work(&dev->kevent); + /* FALLTHROUGH */ + case -ESHUTDOWN: + case -ENOENT: /* suspended */ + case -ECONNRESET: /* unplug */ + case -EPROTO: + dev_kfree_skb_any(skb); + break; + + /*resubmit */ + case -EOVERFLOW: /*babble error*/ + default: + queue = 1; + dev_kfree_skb_any(skb); + pr_debug_ratelimited("%s: non zero urb status = %d\n", + __func__, urb->status); + break; + } + + spin_lock(&dev->rx_done.lock); + list_add_tail(&urb->urb_list, &dev->rx_idle); + spin_unlock(&dev->rx_done.lock); + + if (queue) + queue_work(dev->wq, &dev->process_rx_w); +} + +static int submit_rx_urb(struct data_bridge *dev, struct urb *rx_urb, + gfp_t flags) +{ + struct sk_buff *skb; + int retval = -EINVAL; + + skb = alloc_skb(RMNET_RX_BUFSIZE, flags); + if (!skb) { + usb_free_urb(rx_urb); + return -ENOMEM; + } + + *((struct data_bridge **)skb->cb) = dev; + + usb_fill_bulk_urb(rx_urb, dev->udev, dev->bulk_in, + skb->data, RMNET_RX_BUFSIZE, + data_bridge_read_cb, skb); + + if (test_bit(SUSPENDED, &dev->flags)) + goto suspended; + + usb_anchor_urb(rx_urb, &dev->rx_active); + retval = usb_submit_urb(rx_urb, flags); + if (retval) + goto fail; + + return 0; +fail: + usb_unanchor_urb(rx_urb); +suspended: + dev_kfree_skb_any(skb); + usb_free_urb(rx_urb); + return retval; +} + +static int data_bridge_prepare_rx(struct data_bridge *dev) +{ + int i; + struct urb *rx_urb; + + for (i = 0; i < max_rx_urbs; i++) { + rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!rx_urb) + return -ENOMEM; + + list_add_tail(&rx_urb->urb_list, &dev->rx_idle); + } + return 0; +} + +int data_bridge_open(struct bridge *brdg) +{ + struct data_bridge *dev; + + if (!brdg) { + err("bridge is null\n"); + return -EINVAL; + } + + if (brdg->ch_id >= MAX_BRIDGE_DEVICES) + return -EINVAL; + + dev = __dev[brdg->ch_id]; + if (!dev) { + err("dev is null\n"); + return -ENODEV; + } + + dev_dbg(&dev->udev->dev, "%s: dev:%p\n", __func__, dev); + + dev->brdg = brdg; + atomic_set(&dev->pending_txurbs, 0); + dev->to_host = 0; + dev->to_modem = 0; + dev->txurb_drp_cnt = 0; + dev->tx_throttled_cnt = 0; + dev->tx_unthrottled_cnt = 0; + dev->rx_throttled_cnt = 0; + dev->rx_unthrottled_cnt = 0; + + queue_work(dev->wq, &dev->process_rx_w); + + return 0; +} +EXPORT_SYMBOL(data_bridge_open); + +void data_bridge_close(unsigned int id) +{ + struct data_bridge *dev; + struct sk_buff *skb; + unsigned long flags; + + if (id >= MAX_BRIDGE_DEVICES) + return; + + dev = __dev[id]; + if (!dev && !dev->brdg) + return; + + dev_dbg(&dev->udev->dev, "%s:\n", __func__); + + usb_unlink_anchored_urbs(&dev->tx_active); + usb_unlink_anchored_urbs(&dev->rx_active); + usb_unlink_anchored_urbs(&dev->delayed); + + spin_lock_irqsave(&dev->rx_done.lock, flags); + while ((skb = __skb_dequeue(&dev->rx_done))) + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + + dev->brdg = NULL; +} +EXPORT_SYMBOL(data_bridge_close); + +static void defer_kevent(struct work_struct *work) +{ + int status; + struct data_bridge *dev = + container_of(work, struct data_bridge, kevent); + + if (!dev) + return; + + if (test_bit(TX_HALT, &dev->flags)) { + usb_unlink_anchored_urbs(&dev->tx_active); + + status = usb_autopm_get_interface(dev->intf); + if (status < 0) { + dev_err(&dev->udev->dev, + "can't acquire interface, status %d\n", status); + return; + } + + status = usb_clear_halt(dev->udev, dev->bulk_out); + usb_autopm_put_interface(dev->intf); + if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) + dev_err(&dev->udev->dev, + "can't clear tx halt, status %d\n", status); + else + clear_bit(TX_HALT, &dev->flags); + } + + if (test_bit(RX_HALT, &dev->flags)) { + usb_unlink_anchored_urbs(&dev->rx_active); + + status = usb_autopm_get_interface(dev->intf); + if (status < 0) { + dev_err(&dev->udev->dev, + "can't acquire interface, status %d\n", status); + return; + } + + status = usb_clear_halt(dev->udev, dev->bulk_in); + usb_autopm_put_interface(dev->intf); + if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) + dev_err(&dev->udev->dev, + "can't clear rx halt, status %d\n", status); + else { + clear_bit(RX_HALT, &dev->flags); + if (dev->brdg) + queue_work(dev->wq, &dev->process_rx_w); + } + } +} + +static void data_bridge_write_cb(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct data_bridge *dev = *(struct data_bridge **)skb->cb; + struct bridge *brdg = dev->brdg; + int pending; + + pr_debug("%s: dev:%p\n", __func__, dev); + + switch (urb->status) { + case 0: /*success*/ + break; + case -EPIPE: + set_bit(TX_HALT, &dev->flags); + dev_err(&dev->udev->dev, "%s: epout halted\n", __func__); + schedule_work(&dev->kevent); + /* FALLTHROUGH */ + case -ESHUTDOWN: + case -ENOENT: /* suspended */ + case -ECONNRESET: /* unplug */ + case -EOVERFLOW: /*babble error*/ + /* FALLTHROUGH */ + default: + pr_debug_ratelimited("%s: non zero urb status = %d\n", + __func__, urb->status); + } + + usb_free_urb(urb); + dev_kfree_skb_any(skb); + + pending = atomic_dec_return(&dev->pending_txurbs); + + /*flow ctrl*/ + if (brdg && fctrl_support && pending <= fctrl_dis_thld && + test_and_clear_bit(TX_THROTTLED, &brdg->flags)) { + pr_debug_ratelimited("%s: disable flow ctrl: pend urbs:%u\n", + __func__, pending); + dev->tx_unthrottled_cnt++; + if (brdg->ops.unthrottle_tx) + brdg->ops.unthrottle_tx(brdg->ctx); + } + + usb_autopm_put_interface_async(dev->intf); +} + +int data_bridge_write(unsigned int id, struct sk_buff *skb) +{ + int result; + int size = skb->len; + int pending; + struct urb *txurb; + struct data_bridge *dev = __dev[id]; + struct bridge *brdg; + + if (!dev || !dev->brdg || !usb_get_intfdata(dev->intf)) + return -ENODEV; + + brdg = dev->brdg; + + dev_dbg(&dev->udev->dev, "%s: write (%d bytes)\n", __func__, skb->len); + + result = usb_autopm_get_interface(dev->intf); + if (result < 0) { + dev_err(&dev->udev->dev, "%s: resume failure\n", __func__); + goto error; + } + + txurb = usb_alloc_urb(0, GFP_KERNEL); + if (!txurb) { + dev_err(&dev->udev->dev, "%s: error allocating read urb\n", + __func__); + result = -ENOMEM; + goto error; + } + + /* store dev pointer in skb */ + *((struct data_bridge **)skb->cb) = dev; + + usb_fill_bulk_urb(txurb, dev->udev, dev->bulk_out, + skb->data, skb->len, data_bridge_write_cb, skb); + + if (test_bit(SUSPENDED, &dev->flags)) { + usb_anchor_urb(txurb, &dev->delayed); + goto free_urb; + } + + pending = atomic_inc_return(&dev->pending_txurbs); + usb_anchor_urb(txurb, &dev->tx_active); + + result = usb_submit_urb(txurb, GFP_KERNEL); + if (result < 0) { + usb_unanchor_urb(txurb); + atomic_dec(&dev->pending_txurbs); + dev_err(&dev->udev->dev, "%s: submit URB error %d\n", + __func__, result); + goto free_urb; + } + + dev->to_modem++; + dev_dbg(&dev->udev->dev, "%s: pending_txurbs: %u\n", __func__, pending); + + /* flow control: last urb submitted but return -EBUSY */ + if (fctrl_support && pending > fctrl_en_thld) { + set_bit(TX_THROTTLED, &brdg->flags); + dev->tx_throttled_cnt++; + pr_debug_ratelimited("%s: enable flow ctrl pend txurbs:%u\n", + __func__, pending); + return -EBUSY; + } + + return size; + +free_urb: + usb_free_urb(txurb); +error: + dev->txurb_drp_cnt++; + usb_autopm_put_interface(dev->intf); + + return result; +} +EXPORT_SYMBOL(data_bridge_write); + +static int data_bridge_resume(struct data_bridge *dev) +{ + struct urb *urb; + int retval; + + while ((urb = usb_get_from_anchor(&dev->delayed))) { + usb_anchor_urb(urb, &dev->tx_active); + atomic_inc(&dev->pending_txurbs); + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval < 0) { + atomic_dec(&dev->pending_txurbs); + usb_unanchor_urb(urb); + + /* TODO: need to free urb data */ + usb_scuttle_anchored_urbs(&dev->delayed); + break; + } + dev->to_modem++; + dev->txurb_drp_cnt--; + } + + clear_bit(SUSPENDED, &dev->flags); + + if (dev->brdg) + queue_work(dev->wq, &dev->process_rx_w); + + return 0; +} + +static int bridge_resume(struct usb_interface *iface) +{ + int retval = 0; + int oldstate; + struct data_bridge *dev = usb_get_intfdata(iface); + struct bridge *brdg = dev->brdg; + + oldstate = iface->dev.power.power_state.event; + iface->dev.power.power_state.event = PM_EVENT_ON; + + retval = data_bridge_resume(dev); + if (!retval) { + if (oldstate & PM_EVENT_SUSPEND && brdg) + retval = ctrl_bridge_resume(brdg->ch_id); + } + return retval; +} + +static int data_bridge_suspend(struct data_bridge *dev, pm_message_t message) +{ + if (atomic_read(&dev->pending_txurbs) && + (message.event & PM_EVENT_AUTO)) + return -EBUSY; + + set_bit(SUSPENDED, &dev->flags); + + usb_kill_anchored_urbs(&dev->tx_active); + usb_kill_anchored_urbs(&dev->rx_active); + + return 0; +} + +static int bridge_suspend(struct usb_interface *intf, pm_message_t message) +{ + int retval; + struct data_bridge *dev = usb_get_intfdata(intf); + struct bridge *brdg = dev->brdg; + + retval = data_bridge_suspend(dev, message); + if (!retval) { + if (message.event & PM_EVENT_SUSPEND) { + if (brdg) + retval = ctrl_bridge_suspend(brdg->ch_id); + intf->dev.power.power_state.event = message.event; + } + } else { + dev_dbg(&dev->udev->dev, "%s: device is busy,cannot suspend\n", + __func__); + } + return retval; +} + +static int data_bridge_probe(struct usb_interface *iface, + struct usb_host_endpoint *bulk_in, + struct usb_host_endpoint *bulk_out, int id) +{ + struct data_bridge *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + err("%s: unable to allocate dev\n", __func__); + return -ENOMEM; + } + + dev->pdev = platform_device_alloc(data_bridge_names[id], id); + if (!dev->pdev) { + err("%s: unable to allocate platform device\n", __func__); + kfree(dev); + return -ENOMEM; + } + + init_usb_anchor(&dev->tx_active); + init_usb_anchor(&dev->rx_active); + init_usb_anchor(&dev->delayed); + + INIT_LIST_HEAD(&dev->rx_idle); + skb_queue_head_init(&dev->rx_done); + + dev->wq = bridge_wq; + + dev->udev = interface_to_usbdev(iface); + dev->intf = iface; + + dev->bulk_in = usb_rcvbulkpipe(dev->udev, + bulk_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + + dev->bulk_out = usb_sndbulkpipe(dev->udev, + bulk_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + + usb_set_intfdata(iface, dev); + + INIT_WORK(&dev->kevent, defer_kevent); + INIT_WORK(&dev->process_rx_w, data_bridge_process_rx); + + __dev[id] = dev; + + /*allocate list of rx urbs*/ + data_bridge_prepare_rx(dev); + + platform_device_add(dev->pdev); + + return 0; +} + +#if defined(CONFIG_DEBUG_FS) +#define DEBUG_BUF_SIZE 1024 +static ssize_t data_bridge_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct data_bridge *dev; + char *buf; + int ret; + int i; + int temp = 0; + + buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp, + "\nName#%s dev %p\n" + "pending tx urbs: %u\n" + "tx urb drp cnt: %u\n" + "to host: %lu\n" + "to mdm: %lu\n" + "tx throttled cnt: %u\n" + "tx unthrottled cnt: %u\n" + "rx throttled cnt: %u\n" + "rx unthrottled cnt: %u\n" + "rx done skb qlen: %u\n" + "suspended: %d\n" + "TX_HALT: %d\n" + "RX_HALT: %d\n", + dev->pdev->name, dev, + atomic_read(&dev->pending_txurbs), + dev->txurb_drp_cnt, + dev->to_host, + dev->to_modem, + dev->tx_throttled_cnt, + dev->tx_unthrottled_cnt, + dev->rx_throttled_cnt, + dev->rx_unthrottled_cnt, + dev->rx_done.qlen, + test_bit(SUSPENDED, &dev->flags), + test_bit(TX_HALT, &dev->flags), + test_bit(RX_HALT, &dev->flags)); + + } + + ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); + + kfree(buf); + + return ret; +} + +static ssize_t data_bridge_reset_stats(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct data_bridge *dev; + int i; + + for (i = 0; i < ch_id; i++) { + dev = __dev[i]; + if (!dev) + continue; + + dev->to_host = 0; + dev->to_modem = 0; + dev->txurb_drp_cnt = 0; + dev->tx_throttled_cnt = 0; + dev->tx_unthrottled_cnt = 0; + dev->rx_throttled_cnt = 0; + dev->rx_unthrottled_cnt = 0; + } + return count; +} + +const struct file_operations data_stats_ops = { + .read = data_bridge_read_stats, + .write = data_bridge_reset_stats, +}; + +struct dentry *data_dent; +struct dentry *data_dfile; +static void data_bridge_debugfs_init(void) +{ + data_dent = debugfs_create_dir("data_hsic_bridge", 0); + if (IS_ERR(data_dent)) + return; + + data_dfile = debugfs_create_file("status", 0644, data_dent, 0, + &data_stats_ops); + if (!data_dfile || IS_ERR(data_dfile)) + debugfs_remove(data_dent); +} + +static void data_bridge_debugfs_exit(void) +{ + debugfs_remove(data_dfile); + debugfs_remove(data_dent); +} + +#else +static void data_bridge_debugfs_init(void) { } +static void data_bridge_debugfs_exit(void) { } +#endif + +static int __devinit +bridge_probe(struct usb_interface *iface, const struct usb_device_id *id) +{ + struct usb_host_endpoint *endpoint = NULL; + struct usb_host_endpoint *bulk_in = NULL; + struct usb_host_endpoint *bulk_out = NULL; + struct usb_host_endpoint *int_in = NULL; + struct usb_device *udev; + int i; + int status = 0; + int numends; + int iface_num; + + iface_num = iface->cur_altsetting->desc.bInterfaceNumber; + + if (iface->num_altsetting != 1) { + err("%s invalid num_altsetting %u\n", + __func__, iface->num_altsetting); + return -EINVAL; + } + + udev = interface_to_usbdev(iface); + usb_get_dev(udev); + + if (iface_num != DUN_IFACE_NUM && iface_num != TETHERED_RMNET_IFACE_NUM) + return 0; + + numends = iface->cur_altsetting->desc.bNumEndpoints; + for (i = 0; i < numends; i++) { + endpoint = iface->cur_altsetting->endpoint + i; + if (!endpoint) { + dev_err(&udev->dev, "%s: invalid endpoint %u\n", + __func__, i); + status = -EINVAL; + goto out; + } + + if (usb_endpoint_is_bulk_in(&endpoint->desc)) + bulk_in = endpoint; + else if (usb_endpoint_is_bulk_out(&endpoint->desc)) + bulk_out = endpoint; + else if (usb_endpoint_is_int_in(&endpoint->desc)) + int_in = endpoint; + } + + if (!bulk_in || !bulk_out || !int_in) { + dev_err(&udev->dev, "%s: invalid endpoints\n", __func__); + status = -EINVAL; + goto out; + } + + status = data_bridge_probe(iface, bulk_in, bulk_out, ch_id); + if (status < 0) { + dev_err(&udev->dev, "data_bridge_probe failed %d\n", status); + goto out; + } + + status = ctrl_bridge_probe(iface, int_in, ch_id); + if (status < 0) { + dev_err(&udev->dev, "ctrl_bridge_probe failed %d\n", status); + goto free_data_bridge; + } + ch_id++; + + return 0; + +free_data_bridge: + platform_device_del(__dev[ch_id]->pdev); + usb_set_intfdata(iface, NULL); + kfree(__dev[ch_id]); + __dev[ch_id] = NULL; +out: + usb_put_dev(udev); + + return status; +} + +static void bridge_disconnect(struct usb_interface *intf) +{ + struct data_bridge *dev = usb_get_intfdata(intf); + struct list_head *head; + struct urb *rx_urb; + unsigned long flags; + int iface_num; + + if (!dev) { + err("%s: data device not found\n", __func__); + return; + } + + iface_num = intf->cur_altsetting->desc.bInterfaceNumber; + if (iface_num != DUN_IFACE_NUM && iface_num != TETHERED_RMNET_IFACE_NUM) + return; + + ch_id--; + ctrl_bridge_disconnect(ch_id); + platform_device_del(dev->pdev); + usb_set_intfdata(intf, NULL); + __dev[ch_id] = NULL; + + cancel_work_sync(&dev->process_rx_w); + cancel_work_sync(&dev->kevent); + + /*free rx urbs*/ + head = &dev->rx_idle; + spin_lock_irqsave(&dev->rx_done.lock, flags); + while (!list_empty(head)) { + rx_urb = list_entry(head->next, struct urb, urb_list); + list_del(&rx_urb->urb_list); + usb_free_urb(rx_urb); + } + spin_unlock_irqrestore(&dev->rx_done.lock, flags); + + usb_put_dev(dev->udev); + kfree(dev); +} + +static const struct usb_device_id bridge_ids[] = { + { USB_DEVICE(0x5c6, 0x9001) }, +}; + +MODULE_DEVICE_TABLE(usb, bridge_ids); + +static struct usb_driver bridge_driver = { + .name = "mdm_bridge", + .probe = bridge_probe, + .disconnect = bridge_disconnect, + .id_table = bridge_ids, + .suspend = bridge_suspend, + .resume = bridge_resume, + .supports_autosuspend = 1, +}; + +static int __init bridge_init(void) +{ + int ret; + + ret = usb_register(&bridge_driver); + if (ret) { + err("%s: unable to register mdm_bridge driver", __func__); + return ret; + } + + bridge_wq = create_singlethread_workqueue("mdm_bridge"); + if (!bridge_wq) { + usb_deregister(&bridge_driver); + pr_err("%s: Unable to create workqueue:bridge\n", __func__); + return -ENOMEM; + } + + data_bridge_debugfs_init(); + + return 0; +} + +static void __exit bridge_exit(void) +{ + data_bridge_debugfs_exit(); + destroy_workqueue(bridge_wq); + usb_deregister(&bridge_driver); +} + +module_init(bridge_init); +module_exit(bridge_exit); + +MODULE_DESCRIPTION("Qualcomm modem data bridge driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index 417b8f207e8..59689fa2f7c 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -24,7 +24,7 @@ #define VENDOR_ID 0x0fc5 #define PRODUCT_ID 0x1227 -#define MAXLEN 6 +#define MAXLEN 8 /* table of devices that work with this driver */ static const struct usb_device_id id_table[] = { diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index bb10846affc..5707f56d804 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1023,7 +1023,10 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param) case 13: /* short read, resembling case 10 */ req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0); /* last data packet "should" be DATA1, not DATA0 */ - len = 1024 - udev->descriptor.bMaxPacketSize0; + if (udev->speed == USB_SPEED_SUPER) + len = 1024 - 512; + else + len = 1024 - udev->descriptor.bMaxPacketSize0; expected = -EREMOTEIO; break; case 14: /* short read; try to fill the last packet */ @@ -1382,11 +1385,15 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb) static int halt_simple(struct usbtest_dev *dev) { - int ep; - int retval = 0; - struct urb *urb; + int ep; + int retval = 0; + struct urb *urb; + struct usb_device *udev = testdev_to_usbdev(dev); - urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512); + if (udev->speed == USB_SPEED_SUPER) + urb = simple_alloc_urb(udev, 0, 1024); + else + urb = simple_alloc_urb(udev, 0, 512); if (urb == NULL) return -ENOMEM; diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index ac5bfd619e6..2504694455f 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref) usb_put_dev(dev->udev); if (dev->cntl_urb) { usb_kill_urb(dev->cntl_urb); - if (dev->cntl_req) - usb_free_coherent(dev->udev, YUREX_BUF_SIZE, - dev->cntl_req, dev->cntl_urb->setup_dma); + kfree(dev->cntl_req); if (dev->cntl_buffer) usb_free_coherent(dev->udev, YUREX_BUF_SIZE, dev->cntl_buffer, dev->cntl_urb->transfer_dma); @@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ } /* allocate buffer for control req */ - dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, - GFP_KERNEL, - &dev->cntl_urb->setup_dma); + dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL); if (!dev->cntl_req) { err("Could not allocate cntl_req"); goto error; @@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr), dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, dev, 1); - dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (usb_submit_urb(dev->urb, GFP_KERNEL)) { retval = -EIO; err("Could not submitting URB"); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index dce7182e1df..a0232a77c05 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2078,8 +2078,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status < 0) goto fail3; - pm_runtime_put(musb->controller); - status = musb_init_debugfs(musb); if (status < 0) goto fail4; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 548338c2147..99ceaef2332 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -576,6 +576,15 @@ void musb_g_tx(struct musb *musb, u8 epnum) if (request->actual == request->length) { musb_g_giveback(musb_ep, request, 0); + /* + * In the giveback function the MUSB lock is + * released and acquired after sometime. During + * this time period the INDEX register could get + * changed by the gadget_queue function especially + * on SMP systems. Reselect the INDEX to be sure + * we are reading/modifying the right registers + */ + musb_ep_select(mbase, epnum); req = musb_ep->desc ? next_request(musb_ep) : NULL; if (!req) { dev_dbg(musb->controller, "%s idle now\n", @@ -968,6 +977,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) } #endif musb_g_giveback(musb_ep, request, 0); + /* + * In the giveback function the MUSB lock is + * released and acquired after sometime. During + * this time period the INDEX register could get + * changed by the gadget_queue function especially + * on SMP systems. Reselect the INDEX to be sure + * we are reading/modifying the right registers + */ + musb_ep_select(mbase, epnum); req = next_request(musb_ep); if (!req) diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index c5d4c44d0ff..6958ab9b99b 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -295,7 +295,8 @@ static int musb_otg_notifications(struct notifier_block *nb, static int omap2430_musb_init(struct musb *musb) { - u32 l, status = 0; + u32 l; + int status = 0; struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; @@ -312,7 +313,7 @@ static int omap2430_musb_init(struct musb *musb) status = pm_runtime_get_sync(dev); if (status < 0) { - dev_err(dev, "pm_runtime_get_sync FAILED"); + dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); goto err1; } @@ -464,14 +465,14 @@ static int __init omap2430_probe(struct platform_device *pdev) goto err2; } + pm_runtime_enable(&pdev->dev); + ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err2; } - pm_runtime_enable(&pdev->dev); - return 0; err2: diff --git a/drivers/usb/otg/otg-wakelock.c b/drivers/usb/otg/otg-wakelock.c new file mode 100644 index 00000000000..f4429b06e14 --- /dev/null +++ b/drivers/usb/otg/otg-wakelock.c @@ -0,0 +1,170 @@ +/* + * otg-wakelock.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#define TEMPORARY_HOLD_TIME 2000 + +static bool enabled = true; +static struct otg_transceiver *otgwl_xceiv; +static struct notifier_block otgwl_nb; + +/* + * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the + * held field is updated to match. + */ + +static DEFINE_SPINLOCK(otgwl_spinlock); + +/* + * Only one lock, but since these 3 fields are associated with each other... + */ + +struct otgwl_lock { + char name[40]; + struct wake_lock wakelock; + bool held; +}; + +/* + * VBUS present lock. Also used as a timed lock on charger + * connect/disconnect and USB host disconnect, to allow the system + * to react to the change in power. + */ + +static struct otgwl_lock vbus_lock; + +static void otgwl_hold(struct otgwl_lock *lock) +{ + if (!lock->held) { + wake_lock(&lock->wakelock); + lock->held = true; + } +} + +static void otgwl_temporary_hold(struct otgwl_lock *lock) +{ + wake_lock_timeout(&lock->wakelock, + msecs_to_jiffies(TEMPORARY_HOLD_TIME)); + lock->held = false; +} + +static void otgwl_drop(struct otgwl_lock *lock) +{ + if (lock->held) { + wake_unlock(&lock->wakelock); + lock->held = false; + } +} + +static void otgwl_handle_event(unsigned long event) +{ + unsigned long irqflags; + + spin_lock_irqsave(&otgwl_spinlock, irqflags); + + if (!enabled) { + otgwl_drop(&vbus_lock); + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); + return; + } + + switch (event) { + case USB_EVENT_VBUS: + case USB_EVENT_ENUMERATED: + otgwl_hold(&vbus_lock); + break; + + case USB_EVENT_NONE: + case USB_EVENT_ID: + case USB_EVENT_CHARGER: + otgwl_temporary_hold(&vbus_lock); + break; + + default: + break; + } + + spin_unlock_irqrestore(&otgwl_spinlock, irqflags); +} + +static int otgwl_otg_notifications(struct notifier_block *nb, + unsigned long event, void *unused) +{ + otgwl_handle_event(event); + return NOTIFY_OK; +} + +static int set_enabled(const char *val, const struct kernel_param *kp) +{ + int rv = param_set_bool(val, kp); + + if (rv) + return rv; + + if (otgwl_xceiv) + otgwl_handle_event(otgwl_xceiv->last_event); + + return 0; +} + +static struct kernel_param_ops enabled_param_ops = { + .set = set_enabled, + .get = param_get_bool, +}; + +module_param_cb(enabled, &enabled_param_ops, &enabled, 0644); +MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present"); + +static int __init otg_wakelock_init(void) +{ + int ret; + + otgwl_xceiv = otg_get_transceiver(); + + if (!otgwl_xceiv) { + pr_err("%s: No OTG transceiver found\n", __func__); + return -ENODEV; + } + + snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s", + dev_name(otgwl_xceiv->dev)); + wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND, + vbus_lock.name); + + otgwl_nb.notifier_call = otgwl_otg_notifications; + ret = otg_register_notifier(otgwl_xceiv, &otgwl_nb); + + if (ret) { + pr_err("%s: otg_register_notifier on transceiver %s" + " failed\n", __func__, + dev_name(otgwl_xceiv->dev)); + otgwl_xceiv = NULL; + wake_lock_destroy(&vbus_lock.wakelock); + return ret; + } + + otgwl_handle_event(otgwl_xceiv->last_event); + return ret; +} + +late_initcall(otg_wakelock_init); + diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fd67cc53545..913a178c17a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *port); static void cp210x_get_termios_port(struct usb_serial_port *port, unsigned int *cflagp, unsigned int *baudp); +static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, + struct ktermios *); static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, struct ktermios*); static int cp210x_tiocmget(struct tty_struct *); @@ -47,6 +49,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int, unsigned int); static void cp210x_break_ctl(struct tty_struct *, int); static int cp210x_startup(struct usb_serial *); +static void cp210x_release(struct usb_serial *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); static int debug; @@ -79,6 +82,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ + { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */ { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ @@ -87,11 +91,14 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ + { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ + { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ + { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ @@ -114,10 +121,13 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ @@ -127,22 +137,58 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ + { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ + { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */ + { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ + { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ + { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ + { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ + { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ + { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ + { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ + { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ + { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */ + { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */ + { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */ + { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */ + { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */ + { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */ + { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */ + { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */ + { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */ + { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */ + { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */ + { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ + { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ + { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ + { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ + { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(usb, id_table); +struct cp210x_port_private { + __u8 bInterfaceNumber; +}; + static struct usb_driver cp210x_driver = { .name = "cp210x", .probe = usb_serial_probe, @@ -168,6 +214,7 @@ static struct usb_serial_driver cp210x_device = { .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_startup, + .release = cp210x_release, .dtr_rts = cp210x_dtr_rts }; @@ -200,6 +247,8 @@ static struct usb_serial_driver cp210x_device = { #define CP210X_EMBED_EVENTS 0x15 #define CP210X_GET_EVENTSTATE 0x16 #define CP210X_SET_CHARS 0x19 +#define CP210X_GET_BAUDRATE 0x1D +#define CP210X_SET_BAUDRATE 0x1E /* CP210X_IFC_ENABLE */ #define UART_ENABLE 0x0001 @@ -253,6 +302,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, unsigned int *data, int size) { struct usb_serial *serial = port->serial; + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); __le32 *buf; int result, i, length; @@ -268,7 +318,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, /* Issue the request, attempting to read 'size' bytes */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), request, REQTYPE_DEVICE_TO_HOST, 0x0000, - 0, buf, size, 300); + port_priv->bInterfaceNumber, buf, size, + USB_CTRL_GET_TIMEOUT); /* Convert data into an array of integers */ for (i = 0; i < length; i++) @@ -296,6 +347,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, unsigned int *data, int size) { struct usb_serial *serial = port->serial; + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); __le32 *buf; int result, i, length; @@ -317,12 +369,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_DEVICE, 0x0000, - 0, buf, size, 300); + port_priv->bInterfaceNumber, buf, size, + USB_CTRL_SET_TIMEOUT); } else { result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_DEVICE, data[0], - 0, NULL, 0, 300); + port_priv->bInterfaceNumber, NULL, 0, + USB_CTRL_SET_TIMEOUT); } kfree(buf); @@ -353,8 +407,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port, * Quantises the baud rate as per AN205 Table 1 */ static unsigned int cp210x_quantise_baudrate(unsigned int baud) { - if (baud <= 56) baud = 0; - else if (baud <= 300) baud = 300; + if (baud <= 300) + baud = 300; else if (baud <= 600) baud = 600; else if (baud <= 1200) baud = 1200; else if (baud <= 1800) baud = 1800; @@ -382,17 +436,15 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) { else if (baud <= 491520) baud = 460800; else if (baud <= 567138) baud = 500000; else if (baud <= 670254) baud = 576000; - else if (baud <= 1053257) baud = 921600; - else if (baud <= 1474560) baud = 1228800; - else if (baud <= 2457600) baud = 1843200; - else baud = 3686400; + else if (baud < 1000000) + baud = 921600; + else if (baud > 2000000) + baud = 2000000; return baud; } static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) { - int result; - dbg("%s - port %d", __func__, port->number); if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) { @@ -401,13 +453,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) return -EPROTO; } - result = usb_serial_generic_open(tty, port); - if (result) - return result; - /* Configure the termios structure */ cp210x_get_termios(tty, port); - return 0; + + /* The baud rate must be initialised on cp2104 */ + if (tty) + cp210x_change_speed(tty, port, NULL); + + return usb_serial_generic_open(tty, port); } static void cp210x_close(struct usb_serial_port *port) @@ -459,10 +512,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, dbg("%s - port %d", __func__, port->number); - cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2); - /* Convert to baudrate */ - if (baud) - baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud); + cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4); dbg("%s - baud rate = %d", __func__, baud); *baudp = baud; @@ -576,11 +626,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, *cflagp = cflag; } +/* + * CP2101 supports the following baud rates: + * + * 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800, + * 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600 + * + * CP2102 and CP2103 support the following additional rates: + * + * 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000, + * 576000 + * + * The device will map a requested rate to a supported one, but the result + * of requests for rates greater than 1053257 is undefined (see AN205). + * + * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud, + * respectively, with an error less than 1%. The actual rates are determined + * by + * + * div = round(freq / (2 x prescale x request)) + * actual = freq / (2 x prescale x div) + * + * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps + * or 1 otherwise. + * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1 + * otherwise. + */ +static void cp210x_change_speed(struct tty_struct *tty, + struct usb_serial_port *port, struct ktermios *old_termios) +{ + u32 baud; + + baud = tty->termios->c_ospeed; + + /* This maps the requested rate to a rate valid on cp2102 or cp2103, + * or to an arbitrary rate in [1M,2M]. + * + * NOTE: B0 is not implemented. + */ + baud = cp210x_quantise_baudrate(baud); + + dbg("%s - setting baud rate to %u", __func__, baud); + if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud, + sizeof(baud))) { + dev_warn(&port->dev, "failed to set baud rate to %u\n", baud); + if (old_termios) + baud = old_termios->c_ospeed; + else + baud = 9600; + } + + tty_encode_baud_rate(tty, baud, baud); +} + static void cp210x_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { unsigned int cflag, old_cflag; - unsigned int baud = 0, bits; + unsigned int bits; unsigned int modem_ctl[4]; dbg("%s - port %d", __func__, port->number); @@ -591,20 +694,9 @@ static void cp210x_set_termios(struct tty_struct *tty, tty->termios->c_cflag &= ~CMSPAR; cflag = tty->termios->c_cflag; old_cflag = old_termios->c_cflag; - baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty)); - - /* If the baud rate is to be updated*/ - if (baud != tty_termios_baud_rate(old_termios) && baud != 0) { - dbg("%s - Setting baud rate to %d baud", __func__, - baud); - if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV, - ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) { - dbg("Baud rate requested not supported by device"); - baud = tty_termios_baud_rate(old_termios); - } - } - /* Report back the resulting baud rate */ - tty_encode_baud_rate(tty, baud, baud); + + if (tty->termios->c_ospeed != old_termios->c_ospeed) + cp210x_change_speed(tty, port, old_termios); /* If the number of data bits is to be updated */ if ((cflag & CSIZE) != (old_cflag & CSIZE)) { @@ -784,11 +876,39 @@ static void cp210x_break_ctl (struct tty_struct *tty, int break_state) static int cp210x_startup(struct usb_serial *serial) { + struct cp210x_port_private *port_priv; + int i; + /* cp210x buffers behave strangely unless device is reset */ usb_reset_device(serial->dev); + + for (i = 0; i < serial->num_ports; i++) { + port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); + if (!port_priv) + return -ENOMEM; + + memset(port_priv, 0x00, sizeof(*port_priv)); + port_priv->bInterfaceNumber = + serial->interface->cur_altsetting->desc.bInterfaceNumber; + + usb_set_serial_port_data(serial->port[i], port_priv); + } + return 0; } +static void cp210x_release(struct usb_serial *serial) +{ + struct cp210x_port_private *port_priv; + int i; + + for (i = 0; i < serial->num_ports; i++) { + port_priv = usb_get_serial_port_data(serial->port[i]); + kfree(port_priv); + usb_set_serial_port_data(serial->port[i], NULL); + } +} + static int __init cp210x_init(void) { int retval; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b02fd5027cc..7639cba7909 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -187,6 +187,7 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk }, { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, @@ -194,6 +195,10 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -535,6 +540,10 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) }, { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) }, + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, @@ -577,9 +586,12 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, /* * ELV devices: */ + { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) }, @@ -637,6 +649,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, + { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, @@ -666,6 +679,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) }, @@ -697,6 +711,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, @@ -730,6 +745,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, @@ -796,14 +812,38 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0x00) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, + { USB_DEVICE(FTDI_VID, PI_C865_PID) }, + { USB_DEVICE(FTDI_VID, PI_C857_PID) }, + { USB_DEVICE(PI_VID, PI_C866_PID) }, + { USB_DEVICE(PI_VID, PI_C663_PID) }, + { USB_DEVICE(PI_VID, PI_C725_PID) }, + { USB_DEVICE(PI_VID, PI_E517_PID) }, + { USB_DEVICE(PI_VID, PI_C863_PID) }, + { USB_DEVICE(PI_VID, PI_E861_PID) }, + { USB_DEVICE(PI_VID, PI_C867_PID) }, + { USB_DEVICE(PI_VID, PI_E609_PID) }, + { USB_DEVICE(PI_VID, PI_E709_PID) }, + { USB_DEVICE(PI_VID, PI_100F_PID) }, + { USB_DEVICE(PI_VID, PI_1011_PID) }, + { USB_DEVICE(PI_VID, PI_1012_PID) }, + { USB_DEVICE(PI_VID, PI_1013_PID) }, + { USB_DEVICE(PI_VID, PI_1014_PID) }, + { USB_DEVICE(PI_VID, PI_1015_PID) }, + { USB_DEVICE(PI_VID, PI_1016_PID) }, + { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, @@ -835,11 +875,20 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), + { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID), .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, + { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, + /* Crucible Devices */ + { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -862,7 +911,8 @@ static const char *ftdi_chip_name[] = { [FT232RL] = "FT232RL", [FT2232H] = "FT2232H", [FT4232H] = "FT4232H", - [FT232H] = "FT232H" + [FT232H] = "FT232H", + [FTX] = "FT-X" }; @@ -1160,7 +1210,8 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty, break; case FT232BM: /* FT232BM chip */ case FT2232C: /* FT2232C chip */ - case FT232RL: + case FT232RL: /* FT232RL chip */ + case FTX: /* FT-X series */ if (baud <= 3000000) { __u16 product_id = le16_to_cpu( port->serial->dev->descriptor.idProduct); @@ -1326,8 +1377,7 @@ static int set_serial_info(struct tty_struct *tty, goto check_and_exit; } - if ((new_serial.baud_base != priv->baud_base) && - (new_serial.baud_base < 9600)) { + if (new_serial.baud_base != priv->baud_base) { mutex_unlock(&priv->cfg_lock); return -EINVAL; } @@ -1447,10 +1497,14 @@ static void ftdi_determine_type(struct usb_serial_port *port) } else if (version < 0x900) { /* Assume it's an FT232RL */ priv->chip_type = FT232RL; - } else { + } else if (version < 0x1000) { /* Assume it's an FT232H */ priv->chip_type = FT232H; + } else { + /* Assume it's an FT-X series device */ + priv->chip_type = FTX; } + dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); } @@ -1578,7 +1632,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port) priv->chip_type == FT232RL || priv->chip_type == FT2232H || priv->chip_type == FT4232H || - priv->chip_type == FT232H)) { + priv->chip_type == FT232H || + priv->chip_type == FTX)) { retval = device_create_file(&port->dev, &dev_attr_latency_timer); } @@ -1600,7 +1655,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port) priv->chip_type == FT232RL || priv->chip_type == FT2232H || priv->chip_type == FT4232H || - priv->chip_type == FT232H) { + priv->chip_type == FT232H || + priv->chip_type == FTX) { device_remove_file(&port->dev, &dev_attr_latency_timer); } } @@ -1751,7 +1807,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) dbg("%s", __func__); - if (strcmp(udev->manufacturer, "CALAO Systems") == 0) + if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || + (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) return ftdi_jtag_probe(serial); return 0; @@ -1816,6 +1873,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) { + struct ktermios dummy; struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); int result; @@ -1834,8 +1892,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) This is same behaviour as serial.c/rs_open() - Kuba */ /* ftdi_set_termios will send usb control messages */ - if (tty) - ftdi_set_termios(tty, port, tty->termios); + if (tty) { + memset(&dummy, 0, sizeof(dummy)); + ftdi_set_termios(tty, port, &dummy); + } /* Start reading from the device */ result = usb_serial_generic_open(tty, port); @@ -2253,6 +2313,7 @@ static int ftdi_tiocmget(struct tty_struct *tty) case FT2232H: case FT4232H: case FT232H: + case FTX: len = 2; break; default: diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 19584faa86f..ed58c6fa8db 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -157,7 +157,8 @@ enum ftdi_chip_type { FT232RL = 5, FT2232H = 6, FT4232H = 7, - FT232H = 8 + FT232H = 8, + FTX = 9, }; enum ftdi_sio_baudrate { diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 055b64ef0bb..5d25e260efc 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -23,12 +23,15 @@ #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ #define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */ +#define FTDI_FTX_PID 0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */ #define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ #define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ /*** third-party PIDs (using FTDI_VID) ***/ +#define FTDI_LUMEL_PD12_PID 0x6002 + /* * Marvell OpenRD Base, Client * http://www.open-rd.org @@ -39,6 +42,13 @@ /* www.candapter.com Ewert Energy Systems CANdapter device */ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ +/* + * Texas Instruments XDS100v2 JTAG / BeagleBone A3 + * http://processors.wiki.ti.com/index.php/XDS100 + * http://beagleboard.org/bone + */ +#define TI_XDS100V2_PID 0xa6d0 + #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ /* US Interface Navigator (http://www.usinterface.com/) */ @@ -64,6 +74,10 @@ #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB #define FTDI_OPENDCC_GBM_PID 0xBFDC +#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD + +/* NZR SEM 16+ USB (http://www.nzr.de) */ +#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */ /* * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com) @@ -90,6 +104,8 @@ #define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */ #define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */ +#define FTDI_DISTORTEC_JTAG_LOCK_PICK_PID 0xCFF8 + /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ /* the VID is the standard ftdi vid (FTDI_VID) */ #define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ @@ -132,6 +148,11 @@ #define XSENS_CONVERTER_6_PID 0xD38E #define XSENS_CONVERTER_7_PID 0xD38F +/** + * Zolix (www.zolix.com.cb) product ids + */ +#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */ + /* * NDI (www.ndigital.com) product ids */ @@ -189,7 +210,7 @@ /* * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). - * All of these devices use FTDI's vendor ID (0x0403). + * Almost all of these devices use FTDI's vendor ID (0x0403). * Further IDs taken from ELV Windows .inf file. * * The previously included PID for the UO 100 module was incorrect. @@ -197,6 +218,8 @@ * * Armin Laeuger originally sent the PID for the UM 100 module. */ +#define FTDI_ELV_VID 0x1B1F /* ELV AG */ +#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */ @@ -502,6 +525,11 @@ */ #define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */ +/* + * TIAO product ids (FTDI_VID) + * http://www.tiaowiki.com/w/Main_Page + */ +#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ /********************************/ @@ -524,6 +552,19 @@ #define ADI_GNICE_PID 0xF000 #define ADI_GNICEPLUS_PID 0xF001 +/* + * Microchip Technology, Inc. + * + * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are + * used by single function CDC ACM class based firmware demo + * applications. The VID/PID has also been used in firmware + * emulating FTDI serial chips by: + * Hornby Elite - Digital Command Control Console + * http://www.hornby.com/hornby-dcc/controllers/ + */ +#define MICROCHIP_VID 0x04D8 +#define MICROCHIP_USB_BOARD_PID 0x000A /* CDC RS-232 Emulation Demo */ + /* * RATOC REX-USB60F */ @@ -543,6 +584,13 @@ #define CONTEC_VID 0x06CE /* Vendor ID */ #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ +/* + * Mitsubishi Electric Corp. (http://www.meau.com) + * Submitted by Konstantin Holoborodko + */ +#define MITSUBISHI_VID 0x06D3 +#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */ + /* * Definitions for B&B Electronics products. */ @@ -667,6 +715,10 @@ #define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */ #define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */ #define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */ +#define SEALEVEL_2803R_1_PID 0Xa02a /* SeaLINK+8 (2803-ROHS) Port 1+2 */ +#define SEALEVEL_2803R_2_PID 0Xa02b /* SeaLINK+8 (2803-ROHS) Port 3+4 */ +#define SEALEVEL_2803R_3_PID 0Xa02c /* SeaLINK+8 (2803-ROHS) Port 5+6 */ +#define SEALEVEL_2803R_4_PID 0Xa02d /* SeaLINK+8 (2803-ROHS) Port 7+8 */ /* * JETI SPECTROMETER SPECBOS 1201 @@ -715,6 +767,14 @@ #define TTI_VID 0x103E /* Vendor Id */ #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */ +/* + * Newport Cooperation (www.newport.com) + */ +#define NEWPORT_VID 0x104D +#define NEWPORT_AGILIS_PID 0x3000 +#define NEWPORT_CONEX_CC_PID 0x3002 +#define NEWPORT_CONEX_AGP_PID 0x3006 + /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ #define INTERBIOMETRICS_VID 0x1209 @@ -758,6 +818,41 @@ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ +#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ + + +/* + * Physik Instrumente + * http://www.physikinstrumente.com/en/products/ + */ +/* These two devices use the VID of FTDI */ +#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */ +#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */ + +#define PI_VID 0x1a72 /* Vendor ID */ +#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */ +#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */ +#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */ +#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */ +#define PI_C863_PID 0x1007 /* PI C-863 */ +#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */ +#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */ +#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */ +#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */ +#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */ +#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */ +#define PI_1012_PID 0x1012 /* PI Motion Controller */ +#define PI_1013_PID 0x1013 /* PI Motion Controller */ +#define PI_1014_PID 0x1014 /* PI Device */ +#define PI_1015_PID 0x1015 /* PI Device */ +#define PI_1016_PID 0x1016 /* PI Digital Servo Module */ + +/* + * Kondo Kagaku Co.Ltd. + * http://www.kondo-robot.com/EN + */ +#define KONDO_VID 0x165c +#define KONDO_USB_SERIAL_PID 0x0002 /* * Bayer Ascensia Contour blood glucose meter USB-converter cable. @@ -1058,7 +1153,8 @@ * STMicroelectonics */ #define ST_VID 0x0483 -#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ +#define ST_STMCLT_2232_PID 0x3746 +#define ST_STMCLT_4232_PID 0x3747 /* * Papouch products (http://www.papouch.com/) @@ -1168,3 +1264,22 @@ */ /* TagTracer MIFARE*/ #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0 + +/* + * Rainforest Automation + */ +/* ZigBee controller */ +#define FTDI_RF_R106 0x8A28 + +/* + * Product: HCP HIT GPRS modem + * Manufacturer: HCP d.o.o. + * ATI command output: Cinterion MC55i + */ +#define FTDI_CINTERION_MC55I_PID 0xA951 + +/* + * Product: Comet Caller ID decoder + * Manufacturer: Crucible Technologies + */ +#define FTDI_CT_COMET_PID 0x8e08 diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index b0a7a9e909a..51b22c3ff81 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -973,10 +973,7 @@ static void garmin_close(struct usb_serial_port *port) if (!serial) return; - mutex_lock(&port->serial->disc_mutex); - - if (!port->serial->disconnected) - garmin_clear(garmin_data_p); + garmin_clear(garmin_data_p); /* shutdown our urbs */ usb_kill_urb(port->read_urb); @@ -985,8 +982,6 @@ static void garmin_close(struct usb_serial_port *port) /* keep reset state so we know that we must start a new session */ if (garmin_data_p->state != STATE_RESET) garmin_data_p->state = STATE_DISCONNECTED; - - mutex_unlock(&port->serial->disc_mutex); } diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index e4db5ad2bc5..9f0b2bff8ee 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -215,8 +215,10 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port) clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { - dev_err(&port->dev, "%s - error submitting urb: %d\n", + if (!port->port.console) { + dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); + } set_bit(i, &port->write_urbs_free); spin_lock_irqsave(&port->lock, flags); port->tx_bytes -= count; diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 0aac00afb5c..1f145bfb7f4 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -558,6 +558,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout, wait_queue_t wait; unsigned long flags; + if (!tty) + return; + if (!timeout) timeout = (HZ * EDGE_CLOSING_WAIT)/100; @@ -2677,15 +2680,7 @@ static int edge_startup(struct usb_serial *serial) static void edge_disconnect(struct usb_serial *serial) { - int i; - struct edgeport_port *edge_port; - dbg("%s", __func__); - - for (i = 0; i < serial->num_ports; ++i) { - edge_port = usb_get_serial_port_data(serial->port[i]); - edge_remove_sysfs_attrs(edge_port->port); - } } static void edge_release(struct usb_serial *serial) @@ -2764,6 +2759,7 @@ static struct usb_serial_driver edgeport_1port_device = { .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_create_sysfs_attrs, + .port_remove = edge_remove_sysfs_attrs, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, @@ -2795,10 +2791,12 @@ static struct usb_serial_driver edgeport_2port_device = { .disconnect = edge_disconnect, .release = edge_release, .port_probe = edge_create_sysfs_attrs, + .port_remove = edge_remove_sysfs_attrs, .ioctl = edge_ioctl, .set_termios = edge_set_termios, .tiocmget = edge_tiocmget, .tiocmset = edge_tiocmset, + .get_icount = edge_get_icount, .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index ba0d28727cc..d3addb2952e 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -359,13 +359,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, WDR_TIMEOUT); - if (rc < 0) - dev_err(&serial->dev->dev, - "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); + kfree(buf); + dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); - kfree(buf); - return rc; + if (rc < 0) { + dev_err(&serial->dev->dev, + "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); + return rc; + } + return 0; } /* mct_u232_set_modem_ctrl */ static int mct_u232_get_modem_stat(struct usb_serial *serial, @@ -574,12 +577,14 @@ static void mct_u232_close(struct usb_serial_port *port) { dbg("%s port %d", __func__, port->number); - if (port->serial->dev) { - /* shutdown our urbs */ - usb_kill_urb(port->write_urb); - usb_kill_urb(port->read_urb); - usb_kill_urb(port->interrupt_in_urb); - } + /* + * Must kill the read urb as it is actually an interrupt urb, which + * generic close thus fails to kill. + */ + usb_kill_urb(port->read_urb); + usb_kill_urb(port->interrupt_in_urb); + + usb_serial_generic_close(port); } /* mct_u232_close */ diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 7b50aa12275..995e56c93bf 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -174,6 +174,7 @@ #define CLK_MULTI_REGISTER ((__u16)(0x02)) #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) +#define GPIO_REGISTER ((__u16)(0x07)) #define SERIAL_LCR_DLAB ((__u16)(0x0080)) @@ -205,7 +206,7 @@ static const struct usb_device_id moschip_port_id_table[] = { {} /* terminating entry */ }; -static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { +static const struct usb_device_id moschip_id_table_combined[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, @@ -234,12 +235,10 @@ struct moschip_port { int port_num; /*Actual port number in the device(1,2,etc) */ struct urb *write_urb; /* write URB for this port */ struct urb *read_urb; /* read URB for this port */ - struct urb *int_urb; __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ char open; char open_ports; - char zombie; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; @@ -504,7 +503,6 @@ static void mos7840_control_callback(struct urb *urb) unsigned char *data; struct moschip_port *mos7840_port; __u8 regval = 0x0; - int result = 0; int status = urb->status; mos7840_port = urb->context; @@ -523,7 +521,7 @@ static void mos7840_control_callback(struct urb *urb) default: dbg("%s - nonzero urb status received: %d", __func__, status); - goto exit; + return; } dbg("%s urb buffer size is %d", __func__, urb->actual_length); @@ -536,17 +534,6 @@ static void mos7840_control_callback(struct urb *urb) mos7840_handle_new_msr(mos7840_port, regval); else if (mos7840_port->MsrLsr == 1) mos7840_handle_new_lsr(mos7840_port, regval); - -exit: - spin_lock(&mos7840_port->pool_lock); - if (!mos7840_port->zombie) - result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC); - spin_unlock(&mos7840_port->pool_lock); - if (result) { - dev_err(&urb->dev->dev, - "%s - Error %d submitting interrupt urb\n", - __func__, result); - } } static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, @@ -654,14 +641,7 @@ static void mos7840_interrupt_callback(struct urb *urb) wreg = MODEM_STATUS_REGISTER; break; } - spin_lock(&mos7840_port->pool_lock); - if (!mos7840_port->zombie) { - rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); - } else { - spin_unlock(&mos7840_port->pool_lock); - return; - } - spin_unlock(&mos7840_port->pool_lock); + rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); } } } @@ -1103,14 +1083,25 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) mos7840_port->read_urb = port->read_urb; /* set up our bulk in urb */ - - usb_fill_bulk_urb(mos7840_port->read_urb, - serial->dev, - usb_rcvbulkpipe(serial->dev, - port->bulk_in_endpointAddress), - port->bulk_in_buffer, - mos7840_port->read_urb->transfer_buffer_length, - mos7840_bulk_in_callback, mos7840_port); + if ((serial->num_ports == 2) + && ((((__u16)port->number - + (__u16)(port->serial->minor)) % 2) != 0)) { + usb_fill_bulk_urb(mos7840_port->read_urb, + serial->dev, + usb_rcvbulkpipe(serial->dev, + (port->bulk_in_endpointAddress) + 2), + port->bulk_in_buffer, + mos7840_port->read_urb->transfer_buffer_length, + mos7840_bulk_in_callback, mos7840_port); + } else { + usb_fill_bulk_urb(mos7840_port->read_urb, + serial->dev, + usb_rcvbulkpipe(serial->dev, + port->bulk_in_endpointAddress), + port->bulk_in_buffer, + mos7840_port->read_urb->transfer_buffer_length, + mos7840_bulk_in_callback, mos7840_port); + } dbg("mos7840_open: bulkin endpoint is %d", port->bulk_in_endpointAddress); @@ -1179,9 +1170,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty) } spin_lock_irqsave(&mos7840_port->pool_lock, flags); - for (i = 0; i < NUM_URBS; ++i) - if (mos7840_port->busy[i]) - chars += URB_TRANSFER_BUFFER_SIZE; + for (i = 0; i < NUM_URBS; ++i) { + if (mos7840_port->busy[i]) { + struct urb *urb = mos7840_port->write_urb_pool[i]; + chars += urb->transfer_buffer_length; + } + } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; @@ -1521,13 +1515,25 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, memcpy(urb->transfer_buffer, current_position, transfer_size); /* fill urb with data and submit */ - usb_fill_bulk_urb(urb, - serial->dev, - usb_sndbulkpipe(serial->dev, - port->bulk_out_endpointAddress), - urb->transfer_buffer, - transfer_size, - mos7840_bulk_out_data_callback, mos7840_port); + if ((serial->num_ports == 2) + && ((((__u16)port->number - + (__u16)(port->serial->minor)) % 2) != 0)) { + usb_fill_bulk_urb(urb, + serial->dev, + usb_sndbulkpipe(serial->dev, + (port->bulk_out_endpointAddress) + 2), + urb->transfer_buffer, + transfer_size, + mos7840_bulk_out_data_callback, mos7840_port); + } else { + usb_fill_bulk_urb(urb, + serial->dev, + usb_sndbulkpipe(serial->dev, + port->bulk_out_endpointAddress), + urb->transfer_buffer, + transfer_size, + mos7840_bulk_out_data_callback, mos7840_port); + } data1 = urb->transfer_buffer; dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress); @@ -1840,7 +1846,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port, } else { #ifdef HW_flow_control - / *setting h/w flow control bit to 0 */ + /* setting h/w flow control bit to 0 */ Data = 0xb; mos7840_port->shadowMCR = Data; status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, @@ -2310,19 +2316,26 @@ static int mos7840_ioctl(struct tty_struct *tty, static int mos7840_calc_num_ports(struct usb_serial *serial) { - int mos7840_num_ports = 0; - - dbg("numberofendpoints: cur %d, alt %d", - (int)serial->interface->cur_altsetting->desc.bNumEndpoints, - (int)serial->interface->altsetting->desc.bNumEndpoints); - if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) { - mos7840_num_ports = serial->num_ports = 2; - } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) { + __u16 Data = 0x00; + int ret = 0; + int mos7840_num_ports; + + ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &Data, + VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + + if ((Data & 0x01) == 0) { + mos7840_num_ports = 2; + serial->num_bulk_in = 2; + serial->num_bulk_out = 2; + serial->num_ports = 2; + } else { + mos7840_num_ports = 4; serial->num_bulk_in = 4; serial->num_bulk_out = 4; - mos7840_num_ports = serial->num_ports = 4; + serial->num_ports = 4; } - dbg ("mos7840_num_ports = %d", mos7840_num_ports); + return mos7840_num_ports; } @@ -2561,7 +2574,6 @@ static int mos7840_startup(struct usb_serial *serial) kfree(mos7840_port->ctrl_buf); usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port); - serial->port[i] = NULL; } return status; } @@ -2574,7 +2586,6 @@ static int mos7840_startup(struct usb_serial *serial) static void mos7840_disconnect(struct usb_serial *serial) { int i; - unsigned long flags; struct moschip_port *mos7840_port; dbg("%s", " disconnect :entering.........."); @@ -2592,9 +2603,6 @@ static void mos7840_disconnect(struct usb_serial *serial) mos7840_port = mos7840_get_port_private(serial->port[i]); dbg ("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { - spin_lock_irqsave(&mos7840_port->pool_lock, flags); - mos7840_port->zombie = 1; - spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); usb_kill_urb(mos7840_port->control_urb); } } @@ -2628,6 +2636,7 @@ static void mos7840_release(struct usb_serial *serial) mos7840_port = mos7840_get_port_private(serial->port[i]); dbg("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { + usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port->ctrl_buf); kfree(mos7840_port->dr); kfree(mos7840_port); diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 60f38d5e64f..0a8c1e64b24 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -315,7 +315,7 @@ static int omninet_write_room(struct tty_struct *tty) int room = 0; /* Default: no room */ /* FIXME: no consistent locking for write_urb_busy */ - if (wport->write_urb_busy) + if (!wport->write_urb_busy) room = wport->bulk_out_size - OMNINET_HEADERLEN; dbg("%s - returns %d", __func__, room); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 96423f3c8ef..5d274b35582 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -160,7 +160,11 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, { struct usb_serial *serial = port->serial; int retval; - u8 buffer[2]; + u8 *buffer; + + buffer = kzalloc(1, GFP_KERNEL); + if (!buffer) + return -ENOMEM; buffer[0] = val; /* Send the message to the vendor control endpoint @@ -169,6 +173,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 0, 0, buffer, 1, 0); + kfree(buffer); return retval; } @@ -292,7 +297,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, if (!dr) { dev_err(&port->dev, "out of memory\n"); count = -ENOMEM; - goto error; + goto error_no_dr; } dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT; @@ -322,6 +327,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, return count; error: + kfree(dr); +error_no_dr: usb_free_urb(urb); error_no_urb: kfree(buffer); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d2becb9eb60..59c49976ca0 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -47,6 +47,7 @@ /* Function prototypes */ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id); +static void option_release(struct usb_serial *serial); static int option_send_setup(struct usb_serial_port *port); static void option_instat_callback(struct urb *urb); @@ -79,84 +80,10 @@ static void option_instat_callback(struct urb *urb); #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 -#define HUAWEI_PRODUCT_E600 0x1001 -#define HUAWEI_PRODUCT_E220 0x1003 -#define HUAWEI_PRODUCT_E220BIS 0x1004 -#define HUAWEI_PRODUCT_E1401 0x1401 -#define HUAWEI_PRODUCT_E1402 0x1402 -#define HUAWEI_PRODUCT_E1403 0x1403 -#define HUAWEI_PRODUCT_E1404 0x1404 -#define HUAWEI_PRODUCT_E1405 0x1405 -#define HUAWEI_PRODUCT_E1406 0x1406 -#define HUAWEI_PRODUCT_E1407 0x1407 -#define HUAWEI_PRODUCT_E1408 0x1408 -#define HUAWEI_PRODUCT_E1409 0x1409 -#define HUAWEI_PRODUCT_E140A 0x140A -#define HUAWEI_PRODUCT_E140B 0x140B -#define HUAWEI_PRODUCT_E140C 0x140C -#define HUAWEI_PRODUCT_E140D 0x140D -#define HUAWEI_PRODUCT_E140E 0x140E -#define HUAWEI_PRODUCT_E140F 0x140F -#define HUAWEI_PRODUCT_E1410 0x1410 -#define HUAWEI_PRODUCT_E1411 0x1411 -#define HUAWEI_PRODUCT_E1412 0x1412 -#define HUAWEI_PRODUCT_E1413 0x1413 -#define HUAWEI_PRODUCT_E1414 0x1414 -#define HUAWEI_PRODUCT_E1415 0x1415 -#define HUAWEI_PRODUCT_E1416 0x1416 -#define HUAWEI_PRODUCT_E1417 0x1417 -#define HUAWEI_PRODUCT_E1418 0x1418 -#define HUAWEI_PRODUCT_E1419 0x1419 -#define HUAWEI_PRODUCT_E141A 0x141A -#define HUAWEI_PRODUCT_E141B 0x141B -#define HUAWEI_PRODUCT_E141C 0x141C -#define HUAWEI_PRODUCT_E141D 0x141D -#define HUAWEI_PRODUCT_E141E 0x141E -#define HUAWEI_PRODUCT_E141F 0x141F -#define HUAWEI_PRODUCT_E1420 0x1420 -#define HUAWEI_PRODUCT_E1421 0x1421 -#define HUAWEI_PRODUCT_E1422 0x1422 -#define HUAWEI_PRODUCT_E1423 0x1423 -#define HUAWEI_PRODUCT_E1424 0x1424 -#define HUAWEI_PRODUCT_E1425 0x1425 -#define HUAWEI_PRODUCT_E1426 0x1426 -#define HUAWEI_PRODUCT_E1427 0x1427 -#define HUAWEI_PRODUCT_E1428 0x1428 -#define HUAWEI_PRODUCT_E1429 0x1429 -#define HUAWEI_PRODUCT_E142A 0x142A -#define HUAWEI_PRODUCT_E142B 0x142B -#define HUAWEI_PRODUCT_E142C 0x142C -#define HUAWEI_PRODUCT_E142D 0x142D -#define HUAWEI_PRODUCT_E142E 0x142E -#define HUAWEI_PRODUCT_E142F 0x142F -#define HUAWEI_PRODUCT_E1430 0x1430 -#define HUAWEI_PRODUCT_E1431 0x1431 -#define HUAWEI_PRODUCT_E1432 0x1432 -#define HUAWEI_PRODUCT_E1433 0x1433 -#define HUAWEI_PRODUCT_E1434 0x1434 -#define HUAWEI_PRODUCT_E1435 0x1435 -#define HUAWEI_PRODUCT_E1436 0x1436 -#define HUAWEI_PRODUCT_E1437 0x1437 -#define HUAWEI_PRODUCT_E1438 0x1438 -#define HUAWEI_PRODUCT_E1439 0x1439 -#define HUAWEI_PRODUCT_E143A 0x143A -#define HUAWEI_PRODUCT_E143B 0x143B -#define HUAWEI_PRODUCT_E143C 0x143C -#define HUAWEI_PRODUCT_E143D 0x143D -#define HUAWEI_PRODUCT_E143E 0x143E -#define HUAWEI_PRODUCT_E143F 0x143F +#define HUAWEI_PRODUCT_E173 0x140C #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 -#define HUAWEI_PRODUCT_E14AC 0x14AC -#define HUAWEI_PRODUCT_K3806 0x14AE #define HUAWEI_PRODUCT_K4605 0x14C6 -#define HUAWEI_PRODUCT_K3770 0x14C9 -#define HUAWEI_PRODUCT_K3771 0x14CA -#define HUAWEI_PRODUCT_K4510 0x14CB -#define HUAWEI_PRODUCT_K4511 0x14CC -#define HUAWEI_PRODUCT_ETS1220 0x1803 -#define HUAWEI_PRODUCT_E353 0x1506 -#define HUAWEI_PRODUCT_E173S 0x1C05 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -231,9 +158,11 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 +#define NOVATELWIRELESS_PRODUCT_E362 0x9010 #define NOVATELWIRELESS_PRODUCT_G1 0xA001 #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 +#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 @@ -265,6 +194,10 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 +#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ +#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ +#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ + #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC680 0x180a @@ -307,6 +240,10 @@ static void option_instat_callback(struct urb *urb); #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 #define TELIT_PRODUCT_UC864G 0x1004 +#define TELIT_PRODUCT_CC864_DUAL 0x1005 +#define TELIT_PRODUCT_CC864_SINGLE 0x1006 +#define TELIT_PRODUCT_DE910_DUAL 0x1010 +#define TELIT_PRODUCT_LE920 0x1200 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -352,6 +289,8 @@ static void option_instat_callback(struct urb *urb); /* ALCATEL PRODUCTS */ #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S_X200 0x0000 +#define ALCATEL_PRODUCT_X220_X500D 0x0017 +#define ALCATEL_PRODUCT_L100V 0x011e #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 @@ -403,10 +342,13 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 +#define CINTERION_PRODUCT_AHXX 0x0055 +#define CINTERION_PRODUCT_PLXX 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +#define OLIVETTI_PRODUCT_OLICARD145 0xc003 /* Celot products */ #define CELOT_VENDOR_ID 0x211f @@ -422,7 +364,7 @@ static void option_instat_callback(struct urb *urb); #define SAMSUNG_VENDOR_ID 0x04e8 #define SAMSUNG_PRODUCT_GT_B3730 0x6889 -/* YUGA products www.yuga-info.com*/ +/* YUGA products www.yuga-info.com gavin.kx@qq.com */ #define YUGA_VENDOR_ID 0x257A #define YUGA_PRODUCT_CEM600 0x1601 #define YUGA_PRODUCT_CEM610 0x1602 @@ -439,6 +381,8 @@ static void option_instat_callback(struct urb *urb); #define YUGA_PRODUCT_CEU516 0x160C #define YUGA_PRODUCT_CEU528 0x160D #define YUGA_PRODUCT_CEU526 0x160F +#define YUGA_PRODUCT_CEU881 0x161F +#define YUGA_PRODUCT_CEU882 0x162F #define YUGA_PRODUCT_CWM600 0x2601 #define YUGA_PRODUCT_CWM610 0x2602 @@ -454,23 +398,69 @@ static void option_instat_callback(struct urb *urb); #define YUGA_PRODUCT_CWU518 0x260B #define YUGA_PRODUCT_CWU516 0x260C #define YUGA_PRODUCT_CWU528 0x260D +#define YUGA_PRODUCT_CWU581 0x260E #define YUGA_PRODUCT_CWU526 0x260F - -#define YUGA_PRODUCT_CLM600 0x2601 -#define YUGA_PRODUCT_CLM610 0x2602 -#define YUGA_PRODUCT_CLM500 0x2603 -#define YUGA_PRODUCT_CLM510 0x2604 -#define YUGA_PRODUCT_CLM800 0x2605 -#define YUGA_PRODUCT_CLM900 0x2606 - -#define YUGA_PRODUCT_CLU718 0x2607 -#define YUGA_PRODUCT_CLU716 0x2608 -#define YUGA_PRODUCT_CLU728 0x2609 -#define YUGA_PRODUCT_CLU726 0x260A -#define YUGA_PRODUCT_CLU518 0x260B -#define YUGA_PRODUCT_CLU516 0x260C -#define YUGA_PRODUCT_CLU528 0x260D -#define YUGA_PRODUCT_CLU526 0x260F +#define YUGA_PRODUCT_CWU582 0x261F +#define YUGA_PRODUCT_CWU583 0x262F + +#define YUGA_PRODUCT_CLM600 0x3601 +#define YUGA_PRODUCT_CLM610 0x3602 +#define YUGA_PRODUCT_CLM500 0x3603 +#define YUGA_PRODUCT_CLM510 0x3604 +#define YUGA_PRODUCT_CLM800 0x3605 +#define YUGA_PRODUCT_CLM900 0x3606 + +#define YUGA_PRODUCT_CLU718 0x3607 +#define YUGA_PRODUCT_CLU716 0x3608 +#define YUGA_PRODUCT_CLU728 0x3609 +#define YUGA_PRODUCT_CLU726 0x360A +#define YUGA_PRODUCT_CLU518 0x360B +#define YUGA_PRODUCT_CLU516 0x360C +#define YUGA_PRODUCT_CLU528 0x360D +#define YUGA_PRODUCT_CLU526 0x360F + +/* Viettel products */ +#define VIETTEL_VENDOR_ID 0x2262 +#define VIETTEL_PRODUCT_VT1000 0x0002 + +/* ZD Incorporated */ +#define ZD_VENDOR_ID 0x0685 +#define ZD_PRODUCT_7000 0x7000 + +/* LG products */ +#define LG_VENDOR_ID 0x1004 +#define LG_PRODUCT_L02C 0x618f + +/* MediaTek products */ +#define MEDIATEK_VENDOR_ID 0x0e8d +#define MEDIATEK_PRODUCT_DC_1COM 0x00a0 +#define MEDIATEK_PRODUCT_DC_4COM 0x00a5 +#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7 +#define MEDIATEK_PRODUCT_DC_5COM 0x00a4 +#define MEDIATEK_PRODUCT_7208_1COM 0x7101 +#define MEDIATEK_PRODUCT_7208_2COM 0x7102 +#define MEDIATEK_PRODUCT_7103_2COM 0x7103 +#define MEDIATEK_PRODUCT_7106_2COM 0x7106 +#define MEDIATEK_PRODUCT_FP_1COM 0x0003 +#define MEDIATEK_PRODUCT_FP_2COM 0x0023 +#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043 +#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033 + +/* Cellient products */ +#define CELLIENT_VENDOR_ID 0x2692 +#define CELLIENT_PRODUCT_MEN200 0x9005 + +/* Hyundai Petatel Inc. products */ +#define PETATEL_VENDOR_ID 0x1ff4 +#define PETATEL_PRODUCT_NP10T 0x600e + +/* TP-LINK Incorporated products */ +#define TPLINK_VENDOR_ID 0x2357 +#define TPLINK_PRODUCT_MA180 0x0201 + +/* Changhong products */ +#define CHANGHONG_VENDOR_ID 0x2077 +#define CHANGHONG_PRODUCT_CH690 0x7001 /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { @@ -493,6 +483,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = { static const struct option_blacklist_info alcatel_x200_blacklist = { .sendsetup = BIT(0) | BIT(1), + .reserved = BIT(4), }; static const struct option_blacklist_info zte_0037_blacklist = { @@ -524,6 +515,10 @@ static const struct option_blacklist_info net_intf1_blacklist = { .reserved = BIT(1), }; +static const struct option_blacklist_info net_intf2_blacklist = { + .reserved = BIT(2), +}; + static const struct option_blacklist_info net_intf3_blacklist = { .reserved = BIT(3), }; @@ -536,11 +531,24 @@ static const struct option_blacklist_info net_intf5_blacklist = { .reserved = BIT(5), }; +static const struct option_blacklist_info net_intf6_blacklist = { + .reserved = BIT(6), +}; + static const struct option_blacklist_info zte_mf626_blacklist = { .sendsetup = BIT(0) | BIT(1), .reserved = BIT(4), }; +static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), +}; + +static const struct option_blacklist_info telit_le920_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(5), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -572,99 +580,132 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, + { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */ + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, + + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -690,6 +731,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, @@ -703,6 +745,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, + /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -725,6 +770,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -756,6 +804,11 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -776,7 +829,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), @@ -791,7 +843,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, - /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, @@ -816,7 +867,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, - /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -824,7 +874,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff), @@ -834,7 +883,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, @@ -843,6 +891,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, @@ -851,38 +909,78 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -998,18 +1096,24 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_1255_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, @@ -1054,17 +1158,47 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, + 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, - 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, @@ -1075,6 +1209,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, + { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ @@ -1092,6 +1230,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), @@ -1120,6 +1268,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, @@ -1128,6 +1279,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ @@ -1173,6 +1325,44 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) }, + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) }, + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) }, + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) }, + { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) }, + { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, + { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */ + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -1216,7 +1406,7 @@ static struct usb_serial_driver option_1port_device = { .ioctl = usb_wwan_ioctl, .attach = usb_wwan_startup, .disconnect = usb_wwan_disconnect, - .release = usb_wwan_release, + .release = option_release, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, @@ -1226,35 +1416,6 @@ static struct usb_serial_driver option_1port_device = { static int debug; -/* per port private data */ - -#define N_IN_URB 4 -#define N_OUT_URB 4 -#define IN_BUFLEN 4096 -#define OUT_BUFLEN 4096 - -struct option_port_private { - /* Input endpoints and buffer for this port */ - struct urb *in_urbs[N_IN_URB]; - u8 *in_buffer[N_IN_URB]; - /* Output endpoints and buffer for this port */ - struct urb *out_urbs[N_OUT_URB]; - u8 *out_buffer[N_OUT_URB]; - unsigned long out_busy; /* Bit vector of URBs in use */ - int opened; - struct usb_anchor delayed; - - /* Settings for the port */ - int rts_state; /* Handshaking pins (outputs) */ - int dtr_state; - int cts_state; /* Handshaking pins (inputs) */ - int dsr_state; - int dcd_state; - int ri_state; - - unsigned long tx_start_time[N_OUT_URB]; -}; - /* Functions used by new usb-serial code. */ static int __init option_init(void) { @@ -1335,6 +1496,7 @@ static int option_probe(struct usb_serial *serial, serial->interface->cur_altsetting->desc.bInterfaceNumber, OPTION_BLACKLIST_RESERVED_IF, (const struct option_blacklist_info *) id->driver_info)) + return -ENODEV; /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID && @@ -1351,12 +1513,22 @@ static int option_probe(struct usb_serial *serial, return 0; } +static void option_release(struct usb_serial *serial) +{ + struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); + + usb_wwan_release(serial); + + kfree(priv); +} + static void option_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; - struct option_port_private *portdata = usb_get_serial_port_data(port); + struct usb_wwan_port_private *portdata = + usb_get_serial_port_data(port); dbg("%s", __func__); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); @@ -1417,7 +1589,7 @@ static int option_send_setup(struct usb_serial_port *port) struct usb_serial *serial = port->serial; struct usb_wwan_intf_private *intfdata = (struct usb_wwan_intf_private *) serial->private; - struct option_port_private *portdata; + struct usb_wwan_port_private *portdata; int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; int val = 0; dbg("%s", __func__); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index d44c669cc48..5aa7172e049 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -424,7 +424,7 @@ static void pl2303_set_termios(struct tty_struct *tty, control = priv->line_control; if ((cflag & CBAUD) == B0) priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); - else + else if ((old_termios->c_cflag & CBAUD) == B0) priv->line_control |= (CONTROL_DTR | CONTROL_RTS); if (control != priv->line_control) { control = priv->line_control; diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c index 30b73e68a90..153d71988b4 100644 --- a/drivers/usb/serial/qcaux.c +++ b/drivers/usb/serial/qcaux.c @@ -36,7 +36,6 @@ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712 #define UTSTARCOM_PRODUCT_UM175_V2 0x3714 #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715 -#define PANTECH_PRODUCT_UML290_VZW 0x3718 /* CMOTECH devices */ #define CMOTECH_VENDOR_ID 0x16d8 @@ -67,7 +66,10 @@ static struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) }, - { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */ + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */ + { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index b9bb24729c9..03d5f932d66 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -23,34 +23,45 @@ static int debug; +#define DEVICE_G1K(v, p) \ + USB_DEVICE(v, p), .driver_info = 1 + static const struct usb_device_id id_table[] = { - {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ - {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ - {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ - {USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ - {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ - {USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ - {USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ - {USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ - {USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */ - {USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ - {USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ - {USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ - {USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ - {USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ - {USB_DEVICE(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */ - {USB_DEVICE(0x1557, 0x0a80)}, /* OQO Gobi QDL device */ - {USB_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ - {USB_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ - {USB_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ - {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ - {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ - {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ - {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ - {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ - {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ - {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ - {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ + /* Gobi 1000 devices */ + {DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ + {DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ + {DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ + {DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ + {DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ + {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ + {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */ + {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */ + {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ + {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ + {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ + {DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ + {DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */ + {DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */ + {DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ + {DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ + {DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ + {DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ + {DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ + {DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ + {DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ + {DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ + {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ + {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ + {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ + {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ + + /* Gobi 2000 devices */ + {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ + {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */ + {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */ + {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */ + {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */ @@ -85,7 +96,24 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ + + /* Gobi 3000 devices */ + {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */ + {USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */ + {USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ + {USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */ + {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ + {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ + {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ + {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */ + {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */ {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ + {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */ + {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ + {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */ + {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ + {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ + {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); @@ -107,8 +135,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) int retval = -ENODEV; __u8 nintf; __u8 ifnum; + bool is_gobi1k = id->driver_info ? true : false; dbg("%s", __func__); + dbg("Is Gobi 1000 = %d", is_gobi1k); nintf = serial->dev->actconfig->desc.bNumInterfaces; dbg("Num Interfaces = %d", nintf); @@ -156,15 +186,25 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) case 3: case 4: - /* Composite mode */ - /* ifnum == 0 is a broadband network adapter */ - if (ifnum == 1) { - /* - * Diagnostics Monitor (serial line 9600 8N1) - * Qualcomm DM protocol - * use "libqcdm" (ModemManager) for communication - */ - dbg("Diagnostics Monitor found"); + /* Composite mode; don't bind to the QMI/net interface as that + * gets handled by other drivers. + */ + + /* Gobi 1K USB layout: + * 0: serial port (doesn't respond) + * 1: serial port (doesn't respond) + * 2: AT-capable modem port + * 3: QMI/net + * + * Gobi 2K+ USB layout: + * 0: QMI/net + * 1: DM/DIAG (use libqcdm from ModemManager for communication) + * 2: AT-capable modem port + * 3: NMEA + */ + + if (ifnum == 1 && !is_gobi1k) { + dbg("Gobi 2K+ DM/DIAG interface found"); retval = usb_set_interface(serial->dev, ifnum, 0); if (retval < 0) { dev_err(&serial->dev->dev, @@ -183,13 +223,13 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) retval = -ENODEV; kfree(data); } - } else if (ifnum==3) { + } else if (ifnum==3 && !is_gobi1k) { /* * NMEA (serial line 9600 8N1) * # echo "\$GPS_START" > /dev/ttyUSBx * # echo "\$GPS_STOP" > /dev/ttyUSBx */ - dbg("NMEA GPS interface found"); + dbg("Gobi 2K+ NMEA GPS interface found"); retval = usb_set_interface(serial->dev, ifnum, 0); if (retval < 0) { dev_err(&serial->dev->dev, diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d5d136a53b6..a159ad0c5e1 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -171,7 +171,6 @@ static int sierra_probe(struct usb_serial *serial, { int result = 0; struct usb_device *udev; - struct sierra_intf_private *data; u8 ifnum; udev = serial->dev; @@ -199,11 +198,6 @@ static int sierra_probe(struct usb_serial *serial, return -ENODEV; } - data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL); - if (!data) - return -ENOMEM; - spin_lock_init(&data->susp_lock); - return result; } @@ -221,7 +215,7 @@ static const struct sierra_iface_info typeB_interface_list = { }; /* 'blacklist' of interfaces not served by this driver */ -static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; +static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 }; static const struct sierra_iface_info direct_ip_interface_blacklist = { .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), .ifaceinfo = direct_ip_non_serial_ifaces, @@ -298,9 +292,16 @@ static const struct usb_device_id id_table[] = { /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ + { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */ + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + /* AT&T Direct IP LTE modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, @@ -908,6 +909,7 @@ static void sierra_dtr_rts(struct usb_serial_port *port, int on) static int sierra_startup(struct usb_serial *serial) { struct usb_serial_port *port; + struct sierra_intf_private *intfdata; struct sierra_port_private *portdata; struct sierra_iface_info *himemoryp = NULL; int i; @@ -915,6 +917,14 @@ static int sierra_startup(struct usb_serial *serial) dev_dbg(&serial->dev->dev, "%s\n", __func__); + intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL); + if (!intfdata) + return -ENOMEM; + + spin_lock_init(&intfdata->susp_lock); + + usb_set_serial_data(serial, intfdata); + /* Set Device mode to D0 */ sierra_set_power_state(serial->dev, 0x0000); @@ -930,7 +940,7 @@ static int sierra_startup(struct usb_serial *serial) dev_dbg(&port->dev, "%s: kmalloc for " "sierra_port_private (%d) failed!\n", __func__, i); - return -ENOMEM; + goto err; } spin_lock_init(&portdata->lock); init_usb_anchor(&portdata->active); @@ -967,6 +977,14 @@ static int sierra_startup(struct usb_serial *serial) } return 0; +err: + for (--i; i >= 0; --i) { + portdata = usb_get_serial_port_data(serial->port[i]); + kfree(portdata); + } + kfree(intfdata); + + return -ENOMEM; } static void sierra_release(struct usb_serial *serial) @@ -986,6 +1004,7 @@ static void sierra_release(struct usb_serial *serial) continue; kfree(portdata); } + kfree(serial->private); } #ifdef CONFIG_PM diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index ea8445689c8..2856474123e 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -165,7 +165,7 @@ static unsigned int product_5052_count; /* the array dimension is the number of default entries plus */ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ /* null entry */ -static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { +static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, @@ -179,6 +179,8 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { @@ -188,7 +190,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, }; -static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = { +static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, @@ -206,6 +208,8 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, { } }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index 2aac1953993..b353e7e3d48 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h @@ -37,6 +37,7 @@ #define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */ #define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */ #define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */ +#define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */ /* Multi-Tech vendor and product ids */ #define MTS_VENDOR_ID 0x06E0 @@ -49,6 +50,10 @@ #define MTS_MT9234ZBA_PRODUCT_ID 0xF115 #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 +/* Abbott Diabetics vendor and product ids */ +#define ABBOTT_VENDOR_ID 0x1a61 +#define ABBOTT_PRODUCT_ID 0x3410 + /* Commands */ #define TI_GET_VERSION 0x01 #define TI_GET_PORT_STATUS 0x02 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 1c031309ab2..f6785327477 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -168,6 +168,7 @@ static void destroy_serial(struct kref *kref) } } + usb_put_intf(serial->interface); usb_put_dev(serial->dev); kfree(serial); } @@ -624,7 +625,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, } serial->dev = usb_get_dev(dev); serial->type = driver; - serial->interface = interface; + serial->interface = usb_get_intf(interface); kref_init(&serial->kref); mutex_init(&serial->disc_mutex); serial->minor = SERIAL_TTY_NO_MINOR; @@ -669,12 +670,14 @@ static const struct usb_device_id *get_iface_id(struct usb_serial_driver *drv, static struct usb_serial_driver *search_serial_device( struct usb_interface *iface) { - const struct usb_device_id *id; + const struct usb_device_id *id = NULL; struct usb_serial_driver *drv; + struct usb_driver *driver = to_usb_driver(iface->dev.driver); /* Check if the usb id matches a known device */ list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { - id = get_iface_id(drv, iface); + if (drv->usb_driver == driver) + id = get_iface_id(drv, iface); if (id) return drv; } @@ -762,7 +765,7 @@ int usb_serial_probe(struct usb_interface *interface, if (retval) { dbg("sub driver rejected device"); - kfree(serial); + usb_serial_put(serial); module_put(type->driver.owner); return retval; } @@ -834,7 +837,7 @@ int usb_serial_probe(struct usb_interface *interface, */ if (num_bulk_in == 0 || num_bulk_out == 0) { dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); - kfree(serial); + usb_serial_put(serial); module_put(type->driver.owner); return -ENODEV; } @@ -848,7 +851,7 @@ int usb_serial_probe(struct usb_interface *interface, if (num_ports == 0) { dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n"); - kfree(serial); + usb_serial_put(serial); module_put(type->driver.owner); return -EIO; } @@ -1059,6 +1062,12 @@ int usb_serial_probe(struct usb_interface *interface, serial->attached = 1; } + /* Avoid race with tty_open and serial_install by setting the + * disconnected flag and not clearing it until all ports have been + * registered. + */ + serial->disconnected = 1; + if (get_free_serial(serial, num_ports, &minor) == NULL) { dev_err(&interface->dev, "No more free serial devices\n"); goto probe_error; @@ -1083,6 +1092,8 @@ int usb_serial_probe(struct usb_interface *interface, } } + serial->disconnected = 0; + usb_serial_console_init(debug, minor); exit: diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 5b073bcc807..59d646d207b 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -576,6 +576,7 @@ static int whiteheat_attach(struct usb_serial *serial) "%s: please contact support@connecttech.com\n", serial->type->description); kfree(result); + kfree(command); return -ENODEV; no_command_private: diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index 97987255be7..dc27260068b 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig @@ -199,7 +199,7 @@ config USB_STORAGE_ENE_UB6250 config USB_UAS tristate "USB Attached SCSI" - depends on USB && SCSI + depends on USB && SCSI && BROKEN help The USB Attached SCSI protocol is supported by some USB storage devices. It permits higher performance by supporting diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c index c8447182118..7341ce22d0f 100644 --- a/drivers/usb/storage/cypress_atacb.c +++ b/drivers/usb/storage/cypress_atacb.c @@ -248,14 +248,26 @@ static int cypress_probe(struct usb_interface *intf, { struct us_data *us; int result; + struct usb_device *device; result = usb_stor_probe1(&us, intf, id, (id - cypress_usb_ids) + cypress_unusual_dev_list); if (result) return result; - us->protocol_name = "Transparent SCSI with Cypress ATACB"; - us->proto_handler = cypress_atacb_passthrough; + /* Among CY7C68300 chips, the A revision does not support Cypress ATACB + * Filter out this revision from EEPROM default descriptor values + */ + device = interface_to_usbdev(intf); + if (device->descriptor.iManufacturer != 0x38 || + device->descriptor.iProduct != 0x4e || + device->descriptor.iSerialNumber != 0x64) { + us->protocol_name = "Transparent SCSI with Cypress ATACB"; + us->proto_handler = cypress_atacb_passthrough; + } else { + us->protocol_name = "Transparent SCSI"; + us->proto_handler = usb_stor_transparent_scsi_command; + } result = usb_stor_probe2(us); return result; diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 2c855302622..65a6a75066a 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999, +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, "Super Top", "USB 2.0 SATA BRIDGE", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 24caba79d72..7b8d564c282 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -488,6 +488,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), +/* Added by Dmitry Artamonow */ +UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999, + "Samsung", + "YP-Z3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + /* Entry and supporting patch by Theodore Kilgore . * Device uses standards-violating 32-byte Bulk Command Block Wrappers and * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. @@ -1004,6 +1011,12 @@ UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999, USB_SC_8070, USB_PR_CB, NULL, US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ), +/* Submitted by Oleksandr Chumachenko */ +UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100, + "Casio", + "EX-N1 DigitalCamera", + USB_SC_8070, USB_PR_DEVICE, NULL, 0), + /* Submitted by Hartmut Wahl */ UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001, "Samsung", @@ -1885,6 +1898,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Jesse Feddema */ +UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000, + "Yarvik", + "PMP400", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), + /* Reported by Hans de Goede * These Appotech controllers are found in Picture Frames, they provide a * (buggy) emulation of a cdrom drive which contains the windows software diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index c325e69415a..d582af4a196 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); .useTransport = use_transport, \ } +#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \ + vendor_name, product_name, use_protocol, use_transport, \ + init_function, Flags) \ +{ \ + .vendorName = vendor_name, \ + .productName = product_name, \ + .useProtocol = use_protocol, \ + .useTransport = use_transport, \ + .initFunction = init_function, \ +} + static struct us_unusual_dev us_unusual_dev_list[] = { # include "unusual_devs.h" { } /* Terminating entry */ @@ -128,6 +139,7 @@ static struct us_unusual_dev us_unusual_dev_list[] = { #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF #ifdef CONFIG_PM /* Minimal support for suspend and resume */ @@ -788,15 +800,19 @@ static void quiesce_and_remove_host(struct us_data *us) struct Scsi_Host *host = us_to_host(us); /* If the device is really gone, cut short reset delays */ - if (us->pusb_dev->state == USB_STATE_NOTATTACHED) + if (us->pusb_dev->state == USB_STATE_NOTATTACHED) { set_bit(US_FLIDX_DISCONNECTING, &us->dflags); + wake_up(&us->delay_wait); + } - /* Prevent SCSI-scanning (if it hasn't started yet) - * and wait for the SCSI-scanning thread to stop. + /* Prevent SCSI scanning (if it hasn't started yet) + * or wait for the SCSI-scanning routine to stop. */ - set_bit(US_FLIDX_DONT_SCAN, &us->dflags); - wake_up(&us->delay_wait); - wait_for_completion(&us->scanning_done); + cancel_delayed_work_sync(&us->scan_dwork); + + /* Balance autopm calls if scanning was cancelled */ + if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags)) + usb_autopm_put_interface_no_suspend(us->pusb_intf); /* Removing the host will perform an orderly shutdown: caches * synchronized, disks spun down, etc. @@ -823,52 +839,28 @@ static void release_everything(struct us_data *us) scsi_host_put(us_to_host(us)); } -/* Thread to carry out delayed SCSI-device scanning */ -static int usb_stor_scan_thread(void * __us) +/* Delayed-work routine to carry out SCSI-device scanning */ +static void usb_stor_scan_dwork(struct work_struct *work) { - struct us_data *us = (struct us_data *)__us; + struct us_data *us = container_of(work, struct us_data, + scan_dwork.work); struct device *dev = &us->pusb_intf->dev; - dev_dbg(dev, "device found\n"); + dev_dbg(dev, "starting scan\n"); - set_freezable_with_signal(); - /* - * Wait for the timeout to expire or for a disconnect - * - * We can't freeze in this thread or we risk causing khubd to - * fail to freeze, but we can't be non-freezable either. Nor can - * khubd freeze while waiting for scanning to complete as it may - * hold the device lock, causing a hang when suspending devices. - * So we request a fake signal when freezing and use - * interruptible sleep to kick us out of our wait early when - * freezing happens. - */ - if (delay_use > 0) { - dev_dbg(dev, "waiting for device to settle " - "before scanning\n"); - wait_event_interruptible_timeout(us->delay_wait, - test_bit(US_FLIDX_DONT_SCAN, &us->dflags), - delay_use * HZ); + /* For bulk-only devices, determine the max LUN value */ + if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) { + mutex_lock(&us->dev_mutex); + us->max_lun = usb_stor_Bulk_max_lun(us); + mutex_unlock(&us->dev_mutex); } + scsi_scan_host(us_to_host(us)); + dev_dbg(dev, "scan complete\n"); - /* If the device is still connected, perform the scanning */ - if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) { - - /* For bulk-only devices, determine the max LUN value */ - if (us->protocol == USB_PR_BULK && - !(us->fflags & US_FL_SINGLE_LUN)) { - mutex_lock(&us->dev_mutex); - us->max_lun = usb_stor_Bulk_max_lun(us); - mutex_unlock(&us->dev_mutex); - } - scsi_scan_host(us_to_host(us)); - dev_dbg(dev, "scan complete\n"); - - /* Should we unbind if no devices were detected? */ - } + /* Should we unbind if no devices were detected? */ usb_autopm_put_interface(us->pusb_intf); - complete_and_exit(&us->scanning_done, 0); + clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags); } static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf) @@ -915,7 +907,7 @@ int usb_stor_probe1(struct us_data **pus, init_completion(&us->cmnd_ready); init_completion(&(us->notify)); init_waitqueue_head(&us->delay_wait); - init_completion(&us->scanning_done); + INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork); /* Associate the us_data structure with the USB device */ result = associate_dev(us, intf); @@ -946,7 +938,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1); /* Second part of general USB mass-storage probing */ int usb_stor_probe2(struct us_data *us) { - struct task_struct *th; int result; struct device *dev = &us->pusb_intf->dev; @@ -987,20 +978,14 @@ int usb_stor_probe2(struct us_data *us) goto BadDevice; } - /* Start up the thread for delayed SCSI-device scanning */ - th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan"); - if (IS_ERR(th)) { - dev_warn(dev, - "Unable to start the device-scanning thread\n"); - complete(&us->scanning_done); - quiesce_and_remove_host(us); - result = PTR_ERR(th); - goto BadDevice; - } - + /* Submit the delayed_work for SCSI-device scanning */ usb_autopm_get_interface_no_resume(us->pusb_intf); - wake_up_process(th); + set_bit(US_FLIDX_SCAN_PENDING, &us->dflags); + if (delay_use > 0) + dev_dbg(dev, "waiting for device to settle before scanning\n"); + queue_delayed_work(system_freezable_wq, &us->scan_dwork, + delay_use * HZ); return 0; /* We come here if there are any problems */ @@ -1073,6 +1058,7 @@ static struct usb_driver usb_storage_driver = { .id_table = usb_storage_usb_ids, .supports_autosuspend = 1, .soft_unbind = 1, + .no_dynamic_id = 1, }; static int __init usb_stor_init(void) diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h index 7b0f2113632..75f70f04f37 100644 --- a/drivers/usb/storage/usb.h +++ b/drivers/usb/storage/usb.h @@ -47,6 +47,7 @@ #include #include #include +#include #include struct us_data; @@ -72,7 +73,7 @@ struct us_unusual_dev { #define US_FLIDX_DISCONNECTING 3 /* disconnect in progress */ #define US_FLIDX_RESETTING 4 /* device reset in progress */ #define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */ -#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */ +#define US_FLIDX_SCAN_PENDING 6 /* scanning not yet done */ #define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */ #define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */ @@ -147,8 +148,8 @@ struct us_data { /* mutual exclusion and synchronization structures */ struct completion cmnd_ready; /* to sleep thread on */ struct completion notify; /* thread begin/end */ - wait_queue_head_t delay_wait; /* wait during scan, reset */ - struct completion scanning_done; /* wait for scan thread */ + wait_queue_head_t delay_wait; /* wait during reset */ + struct delayed_work scan_dwork; /* for async scanning */ /* subdriver information */ void *extra; /* Any extra data */ diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c index b96927914f8..a9b5f2e2918 100644 --- a/drivers/usb/storage/usual-tables.c +++ b/drivers/usb/storage/usual-tables.c @@ -46,6 +46,20 @@ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \ .driver_info = ((useType)<<24) } +/* Define the device is matched with Vendor ID and interface descriptors */ +#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \ + vendorName, productName, useProtocol, useTransport, \ + initFunction, flags) \ +{ \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ + | USB_DEVICE_ID_MATCH_VENDOR, \ + .idVendor = (id_vendor), \ + .bInterfaceClass = (cl), \ + .bInterfaceSubClass = (sc), \ + .bInterfaceProtocol = (pr), \ + .driver_info = (flags) \ +} + struct usb_device_id usb_storage_usb_ids[] = { # include "unusual_devs.h" { } /* Terminating entry */ @@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids); #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV +#undef UNUSUAL_VENDOR_INTF /* diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 2babcd4fbfc..86685e99498 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb) dev_err(dev, "NEEP: URB error %d\n", urb->status); } result = usb_submit_urb(urb, GFP_ATOMIC); - if (result < 0) { + if (result < 0 && result != -ENODEV && result != -EPERM) { + /* ignoring unrecoverable errors */ dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", result); goto error; diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c index 697e56a5bcd..47146c89433 100644 --- a/drivers/uwb/neh.c +++ b/drivers/uwb/neh.c @@ -106,6 +106,7 @@ struct uwb_rc_neh { u8 evt_type; __le16 evt; u8 context; + u8 completed; uwb_rc_cmd_cb_f cb; void *arg; @@ -408,6 +409,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size struct device *dev = &rc->uwb_dev.dev; struct uwb_rc_neh *neh; struct uwb_rceb *notif; + unsigned long flags; if (rceb->bEventContext == 0) { notif = kmalloc(size, GFP_ATOMIC); @@ -421,7 +423,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size } else { neh = uwb_rc_neh_lookup(rc, rceb); if (neh) { - del_timer_sync(&neh->timer); + spin_lock_irqsave(&rc->neh_lock, flags); + /* to guard against a timeout */ + neh->completed = 1; + del_timer(&neh->timer); + spin_unlock_irqrestore(&rc->neh_lock, flags); uwb_rc_neh_cb(neh, rceb, size); } else dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", @@ -567,6 +573,10 @@ static void uwb_rc_neh_timer(unsigned long arg) unsigned long flags; spin_lock_irqsave(&rc->neh_lock, flags); + if (neh->completed) { + spin_unlock_irqrestore(&rc->neh_lock, flags); + return; + } if (neh->context) __uwb_rc_neh_rm(rc, neh); else diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e224a92baa1..f27482630ba 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -305,7 +305,8 @@ static void handle_rx(struct vhost_net *net) .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; - int err, headcount, mergeable; + int err, mergeable; + s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; /* TODO: check that we are running from vhost_worker? */ diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ea966b35635..e3fac280e46 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -217,6 +217,8 @@ static int vhost_worker(void *data) if (work) { __set_current_state(TASK_RUNNING); work->fn(work); + if (need_resched()) + schedule(); } else schedule(); @@ -984,7 +986,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, } _iov = iov + ret; size = reg->memory_size - addr + reg->guest_phys_addr; - _iov->iov_len = min((u64)len, size); + _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) (reg->userspace_addr + addr - reg->guest_phys_addr); s += size; diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 4484c721f0f..c2ceae4bb6c 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -1085,7 +1085,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg) */ lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL); - sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); + sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR); lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0); if (sinfo->atmel_lcdfb_power_control) sinfo->atmel_lcdfb_power_control(0); diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index d2a96a421ff..ee0f001cb4d 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -793,7 +793,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) static int adp8860_i2c_resume(struct i2c_client *client) { - adp8860_set_bits(client, ADP8860_MDCR, NSTBY); + adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN); return 0; } diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 05a8832bb3e..bc0503acfb3 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -968,7 +968,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message) static int adp8870_i2c_resume(struct i2c_client *client) { - adp8870_set_bits(client, ADP8870_MDCR, NSTBY); + adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN); return 0; } diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 772f6015219..6f54f7436a9 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -271,7 +271,7 @@ static int tosa_lcd_resume(struct spi_device *spi) } #else #define tosa_lcd_suspend NULL -#define tosa_lcd_reume NULL +#define tosa_lcd_resume NULL #endif static struct spi_driver tosa_lcd_driver = { diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 8745637e4b7..6b4fb5cf153 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -373,8 +373,15 @@ static void fb_flashcursor(struct work_struct *work) struct vc_data *vc = NULL; int c; int mode; + int ret; + + /* FIXME: we should sort out the unbind locking instead */ + /* instead we just fail to flash the cursor if we can't get + * the lock instead of blocking fbcon deinit */ + ret = console_trylock(); + if (ret == 0) + return; - console_lock(); if (ops && ops->currcon != -1) vc = vc_cons[ops->currcon].d; @@ -523,6 +530,33 @@ static int search_for_mapped_con(void) return retval; } +static int do_fbcon_takeover(int show_logo) +{ + int err, i; + + if (!num_registered_fb) + return -ENODEV; + + if (!show_logo) + logo_shown = FBCON_LOGO_DONTSHOW; + + for (i = first_fb_vc; i <= last_fb_vc; i++) + con2fb_map[i] = info_idx; + + err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, + fbcon_is_default); + + if (err) { + for (i = first_fb_vc; i <= last_fb_vc; i++) + con2fb_map[i] = -1; + info_idx = -1; + } else { + fbcon_has_console_bind = 1; + } + + return err; +} + static int fbcon_takeover(int show_logo) { int err, i; @@ -809,6 +843,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, * * Maps a virtual console @unit to a frame buffer device * @newidx. + * + * This should be called with the console lock held. */ static int set_con2fb_map(int unit, int newidx, int user) { @@ -826,7 +862,7 @@ static int set_con2fb_map(int unit, int newidx, int user) if (!search_for_mapped_con() || !con_is_bound(&fb_con)) { info_idx = newidx; - return fbcon_takeover(0); + return do_fbcon_takeover(0); } if (oldidx != -1) @@ -834,7 +870,6 @@ static int set_con2fb_map(int unit, int newidx, int user) found = search_fb_in_map(newidx); - console_lock(); con2fb_map[unit] = newidx; if (!err && !found) err = con2fb_acquire_newinfo(vc, info, unit, oldidx); @@ -861,7 +896,6 @@ static int set_con2fb_map(int unit, int newidx, int user) if (!search_fb_in_map(info_idx)) info_idx = newidx; - console_unlock(); return err; } @@ -984,7 +1018,7 @@ static const char *fbcon_startup(void) } /* Setup default font */ - if (!p->fontdata) { + if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) font = get_default_font(info->var.xres, info->var.yres, @@ -994,6 +1028,8 @@ static const char *fbcon_startup(void) vc->vc_font.height = font->height; vc->vc_font.data = (void *)(p->fontdata = font->data); vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ + } else { + p->fontdata = vc->vc_font.data; } cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); @@ -1153,9 +1189,9 @@ static void fbcon_init(struct vc_data *vc, int init) ops->p = &fb_display[fg_console]; } -static void fbcon_free_font(struct display *p) +static void fbcon_free_font(struct display *p, bool freefont) { - if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) + if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); p->fontdata = NULL; p->userfont = 0; @@ -1167,8 +1203,8 @@ static void fbcon_deinit(struct vc_data *vc) struct fb_info *info; struct fbcon_ops *ops; int idx; + bool free_font = true; - fbcon_free_font(p); idx = con2fb_map[vc->vc_num]; if (idx == -1) @@ -1179,6 +1215,8 @@ static void fbcon_deinit(struct vc_data *vc) if (!info) goto finished; + if (info->flags & FBINFO_MISC_FIRMWARE) + free_font = false; ops = info->fbcon_par; if (!ops) @@ -1190,6 +1228,10 @@ static void fbcon_deinit(struct vc_data *vc) ops->flags &= ~FBCON_FLAGS_INIT; finished: + fbcon_free_font(p, free_font); + if (free_font) + vc->vc_font.data = NULL; + if (!con_is_bound(&fb_con)) fbcon_exit(); @@ -2971,7 +3013,7 @@ static int fbcon_unbind(void) { int ret; - ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, + ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, fbcon_is_default); if (!ret) @@ -2986,6 +3028,7 @@ static inline int fbcon_unbind(void) } #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ +/* called with console_lock held */ static int fbcon_fb_unbind(int idx) { int i, new_idx = -1, ret = 0; @@ -3012,6 +3055,7 @@ static int fbcon_fb_unbind(int idx) return ret; } +/* called with console_lock held */ static int fbcon_fb_unregistered(struct fb_info *info) { int i, idx; @@ -3044,11 +3088,12 @@ static int fbcon_fb_unregistered(struct fb_info *info) primary_device = -1; if (!num_registered_fb) - unregister_con_driver(&fb_con); + do_unregister_con_driver(&fb_con); return 0; } +/* called with console_lock held */ static void fbcon_remap_all(int idx) { int i; @@ -3093,6 +3138,7 @@ static inline void fbcon_select_primary(struct fb_info *info) } #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ +/* called with console_lock held */ static int fbcon_fb_registered(struct fb_info *info) { int ret = 0, i, idx; @@ -3109,7 +3155,7 @@ static int fbcon_fb_registered(struct fb_info *info) } if (info_idx != -1) - ret = fbcon_takeover(1); + ret = do_fbcon_takeover(1); } else { for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map_boot[i] == idx) @@ -3245,6 +3291,7 @@ static int fbcon_event_notify(struct notifier_block *self, ret = fbcon_fb_unregistered(info); break; case FB_EVENT_SET_CONSOLE_MAP: + /* called with console lock held */ con2fb = event->data; ret = set_con2fb_map(con2fb->console - 1, con2fb->framebuffer, 1); diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 6b5b59522b5..798196cf96c 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1154,8 +1154,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, event.data = &con2fb; if (!lock_fb_info(info)) return -ENODEV; + console_lock(); event.info = info; ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); + console_unlock(); unlock_fb_info(info); break; case FBIOBLANK: @@ -1345,15 +1347,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); struct fb_ops *fb; - unsigned long off; + unsigned long mmio_pgoff; unsigned long start; u32 len; if (!info) return -ENODEV; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - off = vma->vm_pgoff << PAGE_SHIFT; fb = info->fbops; if (!fb) return -ENODEV; @@ -1365,33 +1364,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return res; } - /* frame buffer memory */ + /* + * Ugh. This can be either the frame buffer mapping, or + * if pgoff points past it, the mmio mapping. + */ start = info->fix.smem_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); - if (off >= len) { - /* memory mapped io */ - off -= len; - if (info->var.accel_flags) { - mutex_unlock(&info->mm_lock); - return -EINVAL; - } + len = info->fix.smem_len; + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + if (vma->vm_pgoff >= mmio_pgoff) { + vma->vm_pgoff -= mmio_pgoff; start = info->fix.mmio_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); + len = info->fix.mmio_len; } mutex_unlock(&info->mm_lock); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - /* This is an IO map - tell maydump to skip this VMA */ - vma->vm_flags |= VM_IO | VM_RESERVED; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, off); - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - return 0; + fb_pgprotect(file, vma, start); + + return vm_iomap_memory(vma, start, len); } static int @@ -1628,7 +1618,9 @@ static int do_register_framebuffer(struct fb_info *fb_info) event.info = fb_info; if (!lock_fb_info(fb_info)) return -ENODEV; + console_lock(); fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); + console_unlock(); unlock_fb_info(fb_info); return 0; } @@ -1644,13 +1636,16 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) if (!lock_fb_info(fb_info)) return -ENODEV; + console_lock(); event.info = fb_info; ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); + console_unlock(); unlock_fb_info(fb_info); if (ret) return -EINVAL; + unlink_framebuffer(fb_info); if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) kfree(fb_info->pixmap.addr); @@ -1658,15 +1653,32 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) registered_fb[i] = NULL; num_registered_fb--; fb_cleanup_device(fb_info); - device_destroy(fb_class, MKDEV(FB_MAJOR, i)); event.info = fb_info; + console_lock(); fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); + console_unlock(); /* this may free fb info */ put_fb_info(fb_info); return 0; } +int unlink_framebuffer(struct fb_info *fb_info) +{ + int i; + + i = fb_info->node; + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) + return -EINVAL; + + if (fb_info->dev) { + device_destroy(fb_class, MKDEV(FB_MAJOR, i)); + fb_info->dev = NULL; + } + return 0; +} +EXPORT_SYMBOL(unlink_framebuffer); + void remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary) { @@ -1815,11 +1827,8 @@ int fb_new_modelist(struct fb_info *info) err = 1; if (!list_empty(&info->modelist)) { - if (!lock_fb_info(info)) - return -ENODEV; event.info = info; err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); - unlock_fb_info(info); } return err; diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index 67afa9c2289..303fb9f35b2 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c @@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device, if (i * sizeof(struct fb_videomode) != count) return -EINVAL; + if (!lock_fb_info(fb_info)) + return -ENODEV; console_lock(); list_splice(&fb_info->modelist, &old_list); fb_videomode_to_modelist((const struct fb_videomode *)buf, i, @@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device, fb_destroy_modelist(&old_list); console_unlock(); + unlock_fb_info(fb_info); return 0; } diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c index 6caaf8ec5df..29e50997ca7 100644 --- a/drivers/video/msm/external_common.c +++ b/drivers/video/msm/external_common.c @@ -1212,6 +1212,7 @@ void hdmi_common_init_panel_info(struct msm_panel_info *pinfo) pinfo->xres = timing->active_h; pinfo->yres = timing->active_v; pinfo->clk_rate = timing->pixel_freq*1000; + pinfo->frame_rate = 60; pinfo->lcdc.h_back_porch = timing->back_porch_h; pinfo->lcdc.h_front_porch = timing->front_porch_h; diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h index 1172ab09fad..903d09e93aa 100644 --- a/drivers/video/msm/mdp4.h +++ b/drivers/video/msm/mdp4.h @@ -456,8 +456,7 @@ static inline void mdp4_overlay_dtv_wait_for_ov(struct msm_fb_data_type *mfd, #endif int mdp4_overlay_play_wait(struct fb_info *info, struct msmfb_overlay_data *req); -int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, - struct file **pp_src_file); +int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req); struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer, int req_share); void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe); diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c index 73f25b8fb47..4a5a1f48136 100644 --- a/drivers/video/msm/mdp4_overlay.c +++ b/drivers/video/msm/mdp4_overlay.c @@ -1879,39 +1879,57 @@ static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer, } static int get_img(struct msmfb_data *img, struct fb_info *info, - unsigned long *start, unsigned long *len, struct file **pp_file) + unsigned long *start, unsigned long *len, struct file **srcp_file, + struct ion_handle **srcp_ihdl) { +#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; +#endif + struct file *file; int put_needed, ret = 0, fb_num; - struct file *file; #ifdef CONFIG_ANDROID_PMEM unsigned long vstart; #endif - if (img->flags & MDP_BLIT_SRC_GEM) { - *pp_file = NULL; + *srcp_file = NULL; return kgsl_gem_obj_addr(img->memory_id, (int) img->priv, start, len); } + if (img->flags & MDP_MEMORY_ID_TYPE_FB) { + file = fget_light(img->memory_id, &put_needed); + if (file == NULL) + return -EINVAL; + + if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { + fb_num = MINOR(file->f_dentry->d_inode->i_rdev); + if (get_fb_phys_info(start, len, fb_num)) + ret = -1; + else + *srcp_file = file; + } else + ret = -1; + if (ret) + fput_light(file, put_needed); + return ret; + } + +#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION + *srcp_ihdl = ion_import_fd(mfd->client, img->memory_id); + if (IS_ERR_OR_NULL(*srcp_ihdl)) + return PTR_ERR(*srcp_ihdl); + if (!ion_phys(mfd->client, *srcp_ihdl, start, (size_t *) len)) + return 0; + else + return -EINVAL; +#endif #ifdef CONFIG_ANDROID_PMEM - if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file)) + if (!get_pmem_file(img->memory_id, start, &vstart, + len, srcp_file)) return 0; + else + return -EINVAL; #endif - file = fget_light(img->memory_id, &put_needed); - if (file == NULL) - return -1; - - if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { - fb_num = MINOR(file->f_dentry->d_inode->i_rdev); - if (get_fb_phys_info(start, len, fb_num)) - ret = -1; - else - *pp_file = file; - } else - ret = -1; - if (ret) - fput_light(file, put_needed); - return ret; } int mdp4_overlay_3d(struct fb_info *info, struct msmfb_overlay_3d *req) @@ -2460,8 +2478,7 @@ int mdp4_overlay_snapshot(struct msmfb_overlay_data *req, uint32_t phy_addr, } #endif -int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, - struct file **pp_src_file) +int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; struct msmfb_data *img; @@ -2469,7 +2486,9 @@ int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, struct mdp4_pipe_desc *pd; ulong start, addr; ulong len = 0; - struct file *p_src_file = 0; + struct file *srcp0_file = NULL; + struct ion_handle *srcp0_ihdl = NULL; + int ret = 0; if (mfd == NULL) return -ENODEV; @@ -2503,7 +2522,7 @@ int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, pd->player = pipe; /* keep */ img = &req->data; - get_img(img, info, &start, &len, &p_src_file); + get_img(img, info, &start, &len, &srcp0_file, &srcp0_ihdl); if (len == 0) { if (virtualfb3d.is_3d && pipe->pipe_type == OVERLAY_TYPE_VIDEO) atomic_set(&ov_play, 0); @@ -2512,9 +2531,9 @@ int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, complete(&ov_comp); PR_DISP_ERR("%s: pmem Error\n", __func__); - return -1; + ret = -1; + goto end; } - *pp_src_file = p_src_file; addr = start + img->offset; pipe->srcp0_addr = addr; @@ -2612,7 +2631,7 @@ int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, if (atomic_read(&ov_unset)) complete(&ov_comp); - return 0; + goto end; } #ifdef CONFIG_FB_MSM_MIPI_DSI if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) { @@ -2653,7 +2672,17 @@ int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req, atomic_set(&mfd->mdp_pdata->dcr_panel_pinfo->video_mode, 1); } } - return 0; + +end: +#ifdef CONFIG_ANDROID_PMEM + if (srcp0_file) + put_pmem_file(srcp0_file); +#endif +#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION + if (!IS_ERR_OR_NULL(srcp0_ihdl)) + ion_free(mfd->client, srcp0_ihdl); +#endif + return ret; } /*---------------------------------------------------------------------------*/ diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 28607cc81ec..924a947aa72 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -1398,6 +1398,7 @@ static int msm_fb_register(struct msm_fb_data_type *mfd) mfd->var_xres = panel_info->xres; mfd->var_yres = panel_info->yres; + mfd->var_frame_rate = panel_info->frame_rate; var->pixclock = mfd->panel_info.clk_rate; mfd->var_pixclock = var->pixclock; @@ -1977,6 +1978,27 @@ static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) return 0; } +int msm_fb_check_frame_rate(struct msm_fb_data_type *mfd + , struct fb_info *info) +{ + int panel_height, panel_width, var_frame_rate, fps_mod; + struct fb_var_screeninfo *var = &info->var; + fps_mod = 0; + if ((mfd->panel_info.type == DTV_PANEL) || + (mfd->panel_info.type == HDMI_PANEL)) { + panel_height = var->yres + var->upper_margin + + var->vsync_len + var->lower_margin; + panel_width = var->xres + var->right_margin + + var->hsync_len + var->left_margin; + var_frame_rate = ((var->pixclock)/(panel_height * panel_width)); + if (mfd->var_frame_rate != var_frame_rate) { + fps_mod = 1; + mfd->var_frame_rate = var_frame_rate; + } + } + return fps_mod; +} + static int msm_fb_set_par(struct fb_info *info) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; @@ -2018,7 +2040,8 @@ static int msm_fb_set_par(struct fb_info *info) (mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) || (mfd->var_pixclock != var->pixclock) || (mfd->var_xres != var->xres) || - (mfd->var_yres != var->yres)))) { + (mfd->var_yres != var->yres) || + (msm_fb_check_frame_rate(mfd, info))))) { mfd->var_xres = var->xres; mfd->var_yres = var->yres; mfd->var_pixclock = var->pixclock; @@ -2958,7 +2981,6 @@ static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp) int ret; struct msmfb_overlay_data req; struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par; - struct file *p_src_file = 0; struct msm_fb_panel_data *pdata; static uint64_t ovp_dt; static int64_t ovp_count; @@ -3014,7 +3036,7 @@ static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp) } } - ret = mdp4_overlay_play(info, &req, &p_src_file); + ret = mdp4_overlay_play(info, &req); #if defined (CONFIG_FB_MSM_MDP_ABL) @@ -3036,13 +3058,9 @@ static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp) jiffies + ((1000 * HZ) / 1000); add_timer(&mfd->msmfb_no_update_notify_timer); mutex_unlock(&msm_fb_notify_update_sem); -#endif #endif -#ifdef CONFIG_ANDROID_PMEM - if (p_src_file) - put_pmem_file(p_src_file); #endif return ret; @@ -3576,7 +3594,9 @@ struct platform_device *msm_fb_add_device(struct platform_device *pdev) mfd->fb_page = fb_num; mfd->index = fbi_list_index; mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE; - +#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION + mfd->client = msm_ion_client_create(-1, pdev->name); +#endif /* link to the latest pdev */ mfd->pdev = this_dev; diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h index 39c2647ee5b..44ffb794c6b 100644 --- a/drivers/video/msm/msm_fb.h +++ b/drivers/video/msm/msm_fb.h @@ -51,6 +51,7 @@ #include #include +#include #ifdef CONFIG_HAS_EARLYSUSPEND #include @@ -149,6 +150,7 @@ struct msm_fb_data_type { __u32 var_xres; __u32 var_yres; __u32 var_pixclock; + __u32 var_frame_rate; #ifdef MSM_FB_ENABLE_DBGFS struct dentry *sub_dir; @@ -193,6 +195,7 @@ struct msm_fb_data_type { #if defined CONFIG_FB_MSM_MDP_ABL boolean enable_abl; #endif + struct ion_client *client; }; struct dentry *msm_fb_get_debugfs_root(void); @@ -223,5 +226,7 @@ extern int msmfb_get_fb_area(void); void fill_black_screen(void); void unfill_black_screen(void); +int msm_fb_check_frame_rate(struct msm_fb_data_type *mfd, + struct fb_info *info); #endif /* MSM_FB_H */ diff --git a/drivers/video/msm/msm_fb_def.h b/drivers/video/msm/msm_fb_def.h index 733e80c204b..1eb326bc02c 100644 --- a/drivers/video/msm/msm_fb_def.h +++ b/drivers/video/msm/msm_fb_def.h @@ -102,11 +102,11 @@ typedef unsigned int boolean; #define MSM_FB_ENABLE_DBGFS #define FEATURE_MDDI -#if defined(CONFIG_FB_MSM_8X60_DEFAULT_DEPTH_RGB565) +#if defined(CONFIG_FB_MSM_DEFAULT_DEPTH_RGB565) #define MSMFB_DEFAULT_TYPE MDP_RGB_565 -#elif defined(CONFIG_FB_MSM_8X60_DEFAULT_DEPTH_ARGB8888) +#elif defined(CONFIG_FB_MSM_DEFAULT_DEPTH_ARGB8888) #define MSMFB_DEFAULT_TYPE MDP_ARGB_8888 -#elif defined(CONFIG_FB_MSM_8X60_DEFAULT_DEPTH_RGBA8888) +#elif defined(CONFIG_FB_MSM_DEFAULT_DEPTH_RGBA8888) #define MSMFB_DEFAULT_TYPE MDP_RGBA_8888 #else #define MSMFB_DEFAULT_TYPE MDP_RGB_565 diff --git a/drivers/video/msm/msm_fb_panel.h b/drivers/video/msm/msm_fb_panel.h index 717488aecb7..3ecdd24e30b 100644 --- a/drivers/video/msm/msm_fb_panel.h +++ b/drivers/video/msm/msm_fb_panel.h @@ -167,6 +167,7 @@ struct msm_panel_info { __u32 clk_min; __u32 clk_max; __u32 frame_count; + __u32 frame_rate; bool is_3d_panel; diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 0b2f2dd4141..2f902900c80 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -365,7 +365,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info) loop--; } - writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR); + reg = readl(host->base + LCDC_VDCTRL4); + writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4); clk_disable(host->clk); diff --git a/drivers/video/offb.c b/drivers/video/offb.c index cb163a5397b..3251a0236d5 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -100,36 +100,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; - int i, depth; - u32 *pal = info->pseudo_palette; - - depth = info->var.bits_per_pixel; - if (depth == 16) - depth = (info->var.green.length == 5) ? 15 : 16; - - if (regno > 255 || - (depth == 16 && regno > 63) || - (depth == 15 && regno > 31)) - return 1; - - if (regno < 16) { - switch (depth) { - case 15: - pal[regno] = (regno << 10) | (regno << 5) | regno; - break; - case 16: - pal[regno] = (regno << 11) | (regno << 5) | regno; - break; - case 24: - pal[regno] = (regno << 16) | (regno << 8) | regno; - break; - case 32: - i = (regno << 8) | regno; - pal[regno] = (i << 16) | i; - break; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR) { + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= 16) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + mask <<= info->var.transp.offset; + value |= mask; } + pal[regno] = value; + return 0; } + if (regno > 255) + return -EINVAL; + red >>= 8; green >>= 8; blue >>= 8; @@ -381,7 +377,7 @@ static void __init offb_init_fb(const char *name, const char *full_name, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) { - unsigned long res_size = pitch * height * (depth + 7) / 8; + unsigned long res_size = pitch * height; struct offb_par *par = &default_par; unsigned long res_start = address; struct fb_fix_screeninfo *fix; diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index b0555f4f0a7..fadd6a0836c 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -29,6 +29,7 @@ #include #include #include +#include #include